{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\nStart the Exercise\nWe just launchedW3Schools videos\nGet certifiedby completinga course today!\nIf you want to report an error, or if you want to make a suggestion, do not hesitate to send us an e-mail:\nhelp@w3schools.com\nYour message has been sent to W3Schools."},"parsed":{"kind":"list like","value":[{"code":null,"e":63,"s":0,"text":"The z-index property specifies the \nstack order of an element."},{"code":null,"e":126,"s":63,"text":"When elements are positioned, they can overlap other elements."},{"code":null,"e":256,"s":126,"text":"The z-index property specifies the stack order of an element (which element should be placed in front of, or behind, the others)."},{"code":null,"e":312,"s":256,"text":"An element can have a positive or negative stack order:"},{"code":null,"e":386,"s":312,"text":"Because the image has a z-index of -1, it will be placed behind the text."},{"code":null,"e":593,"s":386,"text":"Note: z-index only works on positioned elements (position: absolute, \nposition: relative, position: fixed, or position: sticky) and flex items \n(elements that are direct children of display: flex elements)."},{"code":null,"e":699,"s":593,"text":"Here we see that an element with greater stack order is always above an element with a lower stack order:"},{"code":null,"e":839,"s":699,"text":"If two positioned elements overlap each other without a z-index \nspecified, the element defined last in the HTML code will be shown on top."},{"code":null,"e":898,"s":839,"text":"Same example as above, but here with no z-index specified:"},{"code":null,"e":971,"s":898,"text":"Both the header and the paragraph are positioned at the top of the page."},{"code":null,"e":1032,"s":971,"text":"Make sure that the header is placed on top of the paragraph."},{"code":null,"e":1240,"s":1032,"text":"\n\n\n

This is a heading

\n

This is a paragraph

\n\n"},{"code":null,"e":1259,"s":1240,"text":"Start the Exercise"},{"code":null,"e":1292,"s":1259,"text":"We just launchedW3Schools videos"},{"code":null,"e":1334,"s":1292,"text":"Get certifiedby completinga course today!"},{"code":null,"e":1441,"s":1334,"text":"If you want to report an error, or if you want to make a suggestion, do not hesitate to send us an e-mail:"},{"code":null,"e":1460,"s":1441,"text":"help@w3schools.com"}],"string":"[\n {\n \"code\": null,\n \"e\": 63,\n \"s\": 0,\n \"text\": \"The z-index property specifies the \\nstack order of an element.\"\n },\n {\n \"code\": null,\n \"e\": 126,\n \"s\": 63,\n \"text\": \"When elements are positioned, they can overlap other elements.\"\n },\n {\n \"code\": null,\n \"e\": 256,\n \"s\": 126,\n \"text\": \"The z-index property specifies the stack order of an element (which element should be placed in front of, or behind, the others).\"\n },\n {\n \"code\": null,\n \"e\": 312,\n \"s\": 256,\n \"text\": \"An element can have a positive or negative stack order:\"\n },\n {\n \"code\": null,\n \"e\": 386,\n \"s\": 312,\n \"text\": \"Because the image has a z-index of -1, it will be placed behind the text.\"\n },\n {\n \"code\": null,\n \"e\": 593,\n \"s\": 386,\n \"text\": \"Note: z-index only works on positioned elements (position: absolute, \\nposition: relative, position: fixed, or position: sticky) and flex items \\n(elements that are direct children of display: flex elements).\"\n },\n {\n \"code\": null,\n \"e\": 699,\n \"s\": 593,\n \"text\": \"Here we see that an element with greater stack order is always above an element with a lower stack order:\"\n },\n {\n \"code\": null,\n \"e\": 839,\n \"s\": 699,\n \"text\": \"If two positioned elements overlap each other without a z-index \\nspecified, the element defined last in the HTML code will be shown on top.\"\n },\n {\n \"code\": null,\n \"e\": 898,\n \"s\": 839,\n \"text\": \"Same example as above, but here with no z-index specified:\"\n },\n {\n \"code\": null,\n \"e\": 971,\n \"s\": 898,\n \"text\": \"Both the header and the paragraph are positioned at the top of the page.\"\n },\n {\n \"code\": null,\n \"e\": 1032,\n \"s\": 971,\n \"text\": \"Make sure that the header is placed on top of the paragraph.\"\n },\n {\n \"code\": null,\n \"e\": 1240,\n \"s\": 1032,\n \"text\": \"\\n\\n\\n

This is a heading

\\n

This is a paragraph

\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 1259,\n \"s\": 1240,\n \"text\": \"Start the Exercise\"\n },\n {\n \"code\": null,\n \"e\": 1292,\n \"s\": 1259,\n \"text\": \"We just launchedW3Schools videos\"\n },\n {\n \"code\": null,\n \"e\": 1334,\n \"s\": 1292,\n \"text\": \"Get certifiedby completinga course today!\"\n },\n {\n \"code\": null,\n \"e\": 1441,\n \"s\": 1334,\n \"text\": \"If you want to report an error, or if you want to make a suggestion, do not hesitate to send us an e-mail:\"\n },\n {\n \"code\": null,\n \"e\": 1460,\n \"s\": 1441,\n \"text\": \"help@w3schools.com\"\n }\n]"}}},{"rowIdx":18,"cells":{"title":{"kind":"string","value":"8051 Program to Subtract two 8 Bit numbers"},"text":{"kind":"string","value":"Here we will see how to subtract two 8-bit numbers using this microcontroller. The register A(Accumulator) is used as one operand in the operations. There are seven registers R0 – R7 in different register banks. We can use any of them as the second operand.\nWe are taking two number73H and BDH at location 20H and 21H, After subtracting the result will be stored at location 30H and 31H. \nMOVR0,#20H;set source address 20H to R0\nMOVR1,#30H;set destination address 30H to R1\n\nMOVA,@R0;take the value from source to register A\nMOVR5,A; Move the value from A to R5\nMOVR4,#00H; Clear register R4 to store borrow\n\nINCR0; Point to the next location\nMOVA,@R0; take the value from source to register A\nMOVR3,A; store second byte\nMOVA,R5;get back the first operand\nSUBBA,R3; Subtract R3 from A\n JNCSAVE\n INCR4; Increment R4 to get borrow\n MOVB,R4;Get borrow to register B\n MOV@R1,B; Store the borrow first\n INCR1; Increase R1 to point to the next address\n\nSAVE: MOV@R1,A; Store the result\nHALT: SJMP HALT ;Stop the program\nSo by subtracting 73H –BDH, the result will be B6H. At location 30H, we will get 01H. This indicates that the result is negative. The get the actual value from result B6H, we have to perform 2’s complement operation. After performing 2’s Complement, the result will be -4AH."},"parsed":{"kind":"list like","value":[{"code":null,"e":1320,"s":1062,"text":"Here we will see how to subtract two 8-bit numbers using this microcontroller. The register A(Accumulator) is used as one operand in the operations. There are seven registers R0 – R7 in different register banks. We can use any of them as the second operand."},{"code":null,"e":1452,"s":1320,"text":"We are taking two number73H and BDH at location 20H and 21H, After subtracting the result will be stored at location 30H and 31H. "},{"code":null,"e":2109,"s":1452,"text":"MOVR0,#20H;set source address 20H to R0\nMOVR1,#30H;set destination address 30H to R1\n\nMOVA,@R0;take the value from source to register A\nMOVR5,A; Move the value from A to R5\nMOVR4,#00H; Clear register R4 to store borrow\n\nINCR0; Point to the next location\nMOVA,@R0; take the value from source to register A\nMOVR3,A; store second byte\nMOVA,R5;get back the first operand\nSUBBA,R3; Subtract R3 from A\n JNCSAVE\n INCR4; Increment R4 to get borrow\n MOVB,R4;Get borrow to register B\n MOV@R1,B; Store the borrow first\n INCR1; Increase R1 to point to the next address\n\nSAVE: MOV@R1,A; Store the result\nHALT: SJMP HALT ;Stop the program"},{"code":null,"e":2384,"s":2109,"text":"So by subtracting 73H –BDH, the result will be B6H. At location 30H, we will get 01H. This indicates that the result is negative. The get the actual value from result B6H, we have to perform 2’s complement operation. After performing 2’s Complement, the result will be -4AH."}],"string":"[\n {\n \"code\": null,\n \"e\": 1320,\n \"s\": 1062,\n \"text\": \"Here we will see how to subtract two 8-bit numbers using this microcontroller. The register A(Accumulator) is used as one operand in the operations. There are seven registers R0 – R7 in different register banks. We can use any of them as the second operand.\"\n },\n {\n \"code\": null,\n \"e\": 1452,\n \"s\": 1320,\n \"text\": \"We are taking two number73H and BDH at location 20H and 21H, After subtracting the result will be stored at location 30H and 31H. \"\n },\n {\n \"code\": null,\n \"e\": 2109,\n \"s\": 1452,\n \"text\": \"MOVR0,#20H;set source address 20H to R0\\nMOVR1,#30H;set destination address 30H to R1\\n\\nMOVA,@R0;take the value from source to register A\\nMOVR5,A; Move the value from A to R5\\nMOVR4,#00H; Clear register R4 to store borrow\\n\\nINCR0; Point to the next location\\nMOVA,@R0; take the value from source to register A\\nMOVR3,A; store second byte\\nMOVA,R5;get back the first operand\\nSUBBA,R3; Subtract R3 from A\\n JNCSAVE\\n INCR4; Increment R4 to get borrow\\n MOVB,R4;Get borrow to register B\\n MOV@R1,B; Store the borrow first\\n INCR1; Increase R1 to point to the next address\\n\\nSAVE: MOV@R1,A; Store the result\\nHALT: SJMP HALT ;Stop the program\"\n },\n {\n \"code\": null,\n \"e\": 2384,\n \"s\": 2109,\n \"text\": \"So by subtracting 73H –BDH, the result will be B6H. At location 30H, we will get 01H. This indicates that the result is negative. The get the actual value from result B6H, we have to perform 2’s complement operation. After performing 2’s Complement, the result will be -4AH.\"\n }\n]"}}},{"rowIdx":19,"cells":{"title":{"kind":"string","value":"LISP - Lambda Functions"},"text":{"kind":"string","value":"At times you may need a function in only one place in your program and the function is so trivial that you may not give it a name, or may not like to store it in the symbol table, and would rather write an unnamed or anonymous function.\nLISP allows you to write anonymous functions that are evaluated only when they are encountered in the program. These functions are called Lambda functions.\nYou can create such functions using the lambda expression. The syntax for the lambda expression is as follows −\n(lambda (parameters) body)\nA lambda form cannot be evaluated and it must appear only where LISP expects to find a function.\nCreate a new source code file named main.lisp and type the following code in it.\n(write ((lambda (a b c x)\n (+ (* a (* x x)) (* b x) c))\n 4 2 9 3)\n)\nWhen you execute the code, it returns the following result −\n51\n\n\n 79 Lectures \n 7 hours \n\n Arnold Higuit\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":2297,"s":2060,"text":"At times you may need a function in only one place in your program and the function is so trivial that you may not give it a name, or may not like to store it in the symbol table, and would rather write an unnamed or anonymous function."},{"code":null,"e":2453,"s":2297,"text":"LISP allows you to write anonymous functions that are evaluated only when they are encountered in the program. These functions are called Lambda functions."},{"code":null,"e":2565,"s":2453,"text":"You can create such functions using the lambda expression. The syntax for the lambda expression is as follows −"},{"code":null,"e":2592,"s":2565,"text":"(lambda (parameters) body)"},{"code":null,"e":2689,"s":2592,"text":"A lambda form cannot be evaluated and it must appear only where LISP expects to find a function."},{"code":null,"e":2770,"s":2689,"text":"Create a new source code file named main.lisp and type the following code in it."},{"code":null,"e":2842,"s":2770,"text":"(write ((lambda (a b c x)\n (+ (* a (* x x)) (* b x) c))\n 4 2 9 3)\n)"},{"code":null,"e":2903,"s":2842,"text":"When you execute the code, it returns the following result −"},{"code":null,"e":2907,"s":2903,"text":"51\n"},{"code":null,"e":2940,"s":2907,"text":"\n 79 Lectures \n 7 hours \n"},{"code":null,"e":2955,"s":2940,"text":" Arnold Higuit"},{"code":null,"e":2962,"s":2955,"text":" Print"},{"code":null,"e":2973,"s":2962,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 2297,\n \"s\": 2060,\n \"text\": \"At times you may need a function in only one place in your program and the function is so trivial that you may not give it a name, or may not like to store it in the symbol table, and would rather write an unnamed or anonymous function.\"\n },\n {\n \"code\": null,\n \"e\": 2453,\n \"s\": 2297,\n \"text\": \"LISP allows you to write anonymous functions that are evaluated only when they are encountered in the program. These functions are called Lambda functions.\"\n },\n {\n \"code\": null,\n \"e\": 2565,\n \"s\": 2453,\n \"text\": \"You can create such functions using the lambda expression. The syntax for the lambda expression is as follows −\"\n },\n {\n \"code\": null,\n \"e\": 2592,\n \"s\": 2565,\n \"text\": \"(lambda (parameters) body)\"\n },\n {\n \"code\": null,\n \"e\": 2689,\n \"s\": 2592,\n \"text\": \"A lambda form cannot be evaluated and it must appear only where LISP expects to find a function.\"\n },\n {\n \"code\": null,\n \"e\": 2770,\n \"s\": 2689,\n \"text\": \"Create a new source code file named main.lisp and type the following code in it.\"\n },\n {\n \"code\": null,\n \"e\": 2842,\n \"s\": 2770,\n \"text\": \"(write ((lambda (a b c x)\\n (+ (* a (* x x)) (* b x) c))\\n 4 2 9 3)\\n)\"\n },\n {\n \"code\": null,\n \"e\": 2903,\n \"s\": 2842,\n \"text\": \"When you execute the code, it returns the following result −\"\n },\n {\n \"code\": null,\n \"e\": 2907,\n \"s\": 2903,\n \"text\": \"51\\n\"\n },\n {\n \"code\": null,\n \"e\": 2940,\n \"s\": 2907,\n \"text\": \"\\n 79 Lectures \\n 7 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 2955,\n \"s\": 2940,\n \"text\": \" Arnold Higuit\"\n },\n {\n \"code\": null,\n \"e\": 2962,\n \"s\": 2955,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 2973,\n \"s\": 2962,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":20,"cells":{"title":{"kind":"string","value":"Check if a File exists in C#"},"text":{"kind":"string","value":"Use the File.exists method in C# to check if a file exits in C# or not.\nFirstly, check whether the file is present in the current directory.\nif (File.Exists(\"MyFile.txt\")) {\n Console.WriteLine(\"The file exists.\");\n}\nAfter that check whether the file exist in a directory or not.\nif (File.Exists(@\"D:\\myfile.txt\")) {\n Console.WriteLine(\"The file exists.\");\n}\nLet us see the complete example to check if a file exists in C#.\n Live Demo\nusing System;\nusing System.IO;\nclass Demo {\n static void Main() {\n if (File.Exists(\"MyFile.txt\")) {\n Console.WriteLine(\"File exists...\");\n } else {\n Console.WriteLine(\"File does not exist in the current directory!\");\n }\n if (File.Exists(@\"D:\\myfile.txt\")) {\n Console.WriteLine(\"File exists...\");\n } else {\n Console.WriteLine(\"File does not exist in the D directory!\");\n }\n }\n}\nFile does not exist in the current directory!\nFile does not exist in the D directory!"},"parsed":{"kind":"list like","value":[{"code":null,"e":1134,"s":1062,"text":"Use the File.exists method in C# to check if a file exits in C# or not."},{"code":null,"e":1203,"s":1134,"text":"Firstly, check whether the file is present in the current directory."},{"code":null,"e":1280,"s":1203,"text":"if (File.Exists(\"MyFile.txt\")) {\n Console.WriteLine(\"The file exists.\");\n}"},{"code":null,"e":1343,"s":1280,"text":"After that check whether the file exist in a directory or not."},{"code":null,"e":1424,"s":1343,"text":"if (File.Exists(@\"D:\\myfile.txt\")) {\n Console.WriteLine(\"The file exists.\");\n}"},{"code":null,"e":1489,"s":1424,"text":"Let us see the complete example to check if a file exists in C#."},{"code":null,"e":1500,"s":1489,"text":" Live Demo"},{"code":null,"e":1943,"s":1500,"text":"using System;\nusing System.IO;\nclass Demo {\n static void Main() {\n if (File.Exists(\"MyFile.txt\")) {\n Console.WriteLine(\"File exists...\");\n } else {\n Console.WriteLine(\"File does not exist in the current directory!\");\n }\n if (File.Exists(@\"D:\\myfile.txt\")) {\n Console.WriteLine(\"File exists...\");\n } else {\n Console.WriteLine(\"File does not exist in the D directory!\");\n }\n }\n}"},{"code":null,"e":2029,"s":1943,"text":"File does not exist in the current directory!\nFile does not exist in the D directory!"}],"string":"[\n {\n \"code\": null,\n \"e\": 1134,\n \"s\": 1062,\n \"text\": \"Use the File.exists method in C# to check if a file exits in C# or not.\"\n },\n {\n \"code\": null,\n \"e\": 1203,\n \"s\": 1134,\n \"text\": \"Firstly, check whether the file is present in the current directory.\"\n },\n {\n \"code\": null,\n \"e\": 1280,\n \"s\": 1203,\n \"text\": \"if (File.Exists(\\\"MyFile.txt\\\")) {\\n Console.WriteLine(\\\"The file exists.\\\");\\n}\"\n },\n {\n \"code\": null,\n \"e\": 1343,\n \"s\": 1280,\n \"text\": \"After that check whether the file exist in a directory or not.\"\n },\n {\n \"code\": null,\n \"e\": 1424,\n \"s\": 1343,\n \"text\": \"if (File.Exists(@\\\"D:\\\\myfile.txt\\\")) {\\n Console.WriteLine(\\\"The file exists.\\\");\\n}\"\n },\n {\n \"code\": null,\n \"e\": 1489,\n \"s\": 1424,\n \"text\": \"Let us see the complete example to check if a file exists in C#.\"\n },\n {\n \"code\": null,\n \"e\": 1500,\n \"s\": 1489,\n \"text\": \" Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 1943,\n \"s\": 1500,\n \"text\": \"using System;\\nusing System.IO;\\nclass Demo {\\n static void Main() {\\n if (File.Exists(\\\"MyFile.txt\\\")) {\\n Console.WriteLine(\\\"File exists...\\\");\\n } else {\\n Console.WriteLine(\\\"File does not exist in the current directory!\\\");\\n }\\n if (File.Exists(@\\\"D:\\\\myfile.txt\\\")) {\\n Console.WriteLine(\\\"File exists...\\\");\\n } else {\\n Console.WriteLine(\\\"File does not exist in the D directory!\\\");\\n }\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 2029,\n \"s\": 1943,\n \"text\": \"File does not exist in the current directory!\\nFile does not exist in the D directory!\"\n }\n]"}}},{"rowIdx":21,"cells":{"title":{"kind":"string","value":"override identifier in C++"},"text":{"kind":"string","value":"13 Jun, 2022\nFunction overriding is a redefinition of the base class function in its derived class with the same signature i.e. return type and parameters. But there may be situations when a programmer makes a mistake while overriding that function. So, to keep track of such an error, C++11 has come up with the override identifier. If the compiler comes across this identifier, it understands that this is an overridden version of the same class. It will make the compiler check the base class to see if there is a virtual function with this exact signature. And if there is not, the compiler will show an error. \nThe programmer’s intentions can be made clear to the compiler by override. If the override identifier is used with a member function, the compiler makes sure that the member function exists in the base class, and also the compiler restricts the program to compile otherwise.\nLet’s understand through the following example: \nCPP\n// A CPP program without override keyword, here// programmer makes a mistake and it is not caught#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \"I am in base\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \"int a\" void func(int a) { cout << \"I am in derived class\" << endl; }}; // Driver codeint main(){ Base b; derived d; cout << \"Compiled successfully\" << endl; return 0;}\nCompiled successfully\nExplanation: Here, the user intended to override the function func() in the derived class but did a silly mistake and redefined the function with a different signature. Which was not detected by the compiler. However, the program is not actually what the user wanted. So, to get rid of such silly mistakes to be on the safe side, the override identifier can be used. Below is a C++ example to show the use of override identifier in C++.\nCPP\n// A CPP program that uses override keyword so// that any difference in function signature is// caught during compilation#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \"I am in base\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \"int a\" void func(int a) override { cout << \"I am in derived class\" << endl; }}; int main(){ Base b; derived d; cout << \"Compiled successfully\" << endl; return 0;}\nOutput(Error)\nprog.cpp:17:7: error: 'void derived::func(int)'\nmarked 'override', but does not override\n void func(int a) override \n ^\nIn short, it serves the following functions. It helps to check if: \nThere is a method with the same name in the parent class.\nThe method in the parent class is declared as “virtual” which means it was intended to be rewritten.\nThe method in the parent class has the same signature as the method in the subclass.\nThis article is contributed by MAZHAR IMAM KHAN. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.\nanshikajain26\nshivamgupta2\ncpp-virtual\nC++\nCPP\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":54,"s":26,"text":"\n13 Jun, 2022"},{"code":null,"e":657,"s":54,"text":"Function overriding is a redefinition of the base class function in its derived class with the same signature i.e. return type and parameters. But there may be situations when a programmer makes a mistake while overriding that function. So, to keep track of such an error, C++11 has come up with the override identifier. If the compiler comes across this identifier, it understands that this is an overridden version of the same class. It will make the compiler check the base class to see if there is a virtual function with this exact signature. And if there is not, the compiler will show an error. "},{"code":null,"e":932,"s":657,"text":"The programmer’s intentions can be made clear to the compiler by override. If the override identifier is used with a member function, the compiler makes sure that the member function exists in the base class, and also the compiler restricts the program to compile otherwise."},{"code":null,"e":981,"s":932,"text":"Let’s understand through the following example: "},{"code":null,"e":985,"s":981,"text":"CPP"},{"code":"// A CPP program without override keyword, here// programmer makes a mistake and it is not caught#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \"I am in base\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \"int a\" void func(int a) { cout << \"I am in derived class\" << endl; }}; // Driver codeint main(){ Base b; derived d; cout << \"Compiled successfully\" << endl; return 0;}","e":1553,"s":985,"text":null},{"code":null,"e":1575,"s":1553,"text":"Compiled successfully"},{"code":null,"e":2012,"s":1575,"text":"Explanation: Here, the user intended to override the function func() in the derived class but did a silly mistake and redefined the function with a different signature. Which was not detected by the compiler. However, the program is not actually what the user wanted. So, to get rid of such silly mistakes to be on the safe side, the override identifier can be used. Below is a C++ example to show the use of override identifier in C++."},{"code":null,"e":2016,"s":2012,"text":"CPP"},{"code":"// A CPP program that uses override keyword so// that any difference in function signature is// caught during compilation#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \"I am in base\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \"int a\" void func(int a) override { cout << \"I am in derived class\" << endl; }}; int main(){ Base b; derived d; cout << \"Compiled successfully\" << endl; return 0;}","e":2603,"s":2016,"text":null},{"code":null,"e":2617,"s":2603,"text":"Output(Error)"},{"code":null,"e":2744,"s":2617,"text":"prog.cpp:17:7: error: 'void derived::func(int)'\nmarked 'override', but does not override\n void func(int a) override \n ^"},{"code":null,"e":2812,"s":2744,"text":"In short, it serves the following functions. It helps to check if: "},{"code":null,"e":2870,"s":2812,"text":"There is a method with the same name in the parent class."},{"code":null,"e":2971,"s":2870,"text":"The method in the parent class is declared as “virtual” which means it was intended to be rewritten."},{"code":null,"e":3056,"s":2971,"text":"The method in the parent class has the same signature as the method in the subclass."},{"code":null,"e":3481,"s":3056,"text":"This article is contributed by MAZHAR IMAM KHAN. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."},{"code":null,"e":3495,"s":3481,"text":"anshikajain26"},{"code":null,"e":3508,"s":3495,"text":"shivamgupta2"},{"code":null,"e":3520,"s":3508,"text":"cpp-virtual"},{"code":null,"e":3524,"s":3520,"text":"C++"},{"code":null,"e":3528,"s":3524,"text":"CPP"}],"string":"[\n {\n \"code\": null,\n \"e\": 54,\n \"s\": 26,\n \"text\": \"\\n13 Jun, 2022\"\n },\n {\n \"code\": null,\n \"e\": 657,\n \"s\": 54,\n \"text\": \"Function overriding is a redefinition of the base class function in its derived class with the same signature i.e. return type and parameters. But there may be situations when a programmer makes a mistake while overriding that function. So, to keep track of such an error, C++11 has come up with the override identifier. If the compiler comes across this identifier, it understands that this is an overridden version of the same class. It will make the compiler check the base class to see if there is a virtual function with this exact signature. And if there is not, the compiler will show an error. \"\n },\n {\n \"code\": null,\n \"e\": 932,\n \"s\": 657,\n \"text\": \"The programmer’s intentions can be made clear to the compiler by override. If the override identifier is used with a member function, the compiler makes sure that the member function exists in the base class, and also the compiler restricts the program to compile otherwise.\"\n },\n {\n \"code\": null,\n \"e\": 981,\n \"s\": 932,\n \"text\": \"Let’s understand through the following example: \"\n },\n {\n \"code\": null,\n \"e\": 985,\n \"s\": 981,\n \"text\": \"CPP\"\n },\n {\n \"code\": \"// A CPP program without override keyword, here// programmer makes a mistake and it is not caught#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \\\"I am in base\\\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \\\"int a\\\" void func(int a) { cout << \\\"I am in derived class\\\" << endl; }}; // Driver codeint main(){ Base b; derived d; cout << \\\"Compiled successfully\\\" << endl; return 0;}\",\n \"e\": 1553,\n \"s\": 985,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1575,\n \"s\": 1553,\n \"text\": \"Compiled successfully\"\n },\n {\n \"code\": null,\n \"e\": 2012,\n \"s\": 1575,\n \"text\": \"Explanation: Here, the user intended to override the function func() in the derived class but did a silly mistake and redefined the function with a different signature. Which was not detected by the compiler. However, the program is not actually what the user wanted. So, to get rid of such silly mistakes to be on the safe side, the override identifier can be used. Below is a C++ example to show the use of override identifier in C++.\"\n },\n {\n \"code\": null,\n \"e\": 2016,\n \"s\": 2012,\n \"text\": \"CPP\"\n },\n {\n \"code\": \"// A CPP program that uses override keyword so// that any difference in function signature is// caught during compilation#include using namespace std; class Base {public: // user wants to override this in // the derived class virtual void func() { cout << \\\"I am in base\\\" << endl; }}; class derived : public Base {public: // did a silly mistake by putting // an argument \\\"int a\\\" void func(int a) override { cout << \\\"I am in derived class\\\" << endl; }}; int main(){ Base b; derived d; cout << \\\"Compiled successfully\\\" << endl; return 0;}\",\n \"e\": 2603,\n \"s\": 2016,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2617,\n \"s\": 2603,\n \"text\": \"Output(Error)\"\n },\n {\n \"code\": null,\n \"e\": 2744,\n \"s\": 2617,\n \"text\": \"prog.cpp:17:7: error: 'void derived::func(int)'\\nmarked 'override', but does not override\\n void func(int a) override \\n ^\"\n },\n {\n \"code\": null,\n \"e\": 2812,\n \"s\": 2744,\n \"text\": \"In short, it serves the following functions. It helps to check if: \"\n },\n {\n \"code\": null,\n \"e\": 2870,\n \"s\": 2812,\n \"text\": \"There is a method with the same name in the parent class.\"\n },\n {\n \"code\": null,\n \"e\": 2971,\n \"s\": 2870,\n \"text\": \"The method in the parent class is declared as “virtual” which means it was intended to be rewritten.\"\n },\n {\n \"code\": null,\n \"e\": 3056,\n \"s\": 2971,\n \"text\": \"The method in the parent class has the same signature as the method in the subclass.\"\n },\n {\n \"code\": null,\n \"e\": 3481,\n \"s\": 3056,\n \"text\": \"This article is contributed by MAZHAR IMAM KHAN. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.\"\n },\n {\n \"code\": null,\n \"e\": 3495,\n \"s\": 3481,\n \"text\": \"anshikajain26\"\n },\n {\n \"code\": null,\n \"e\": 3508,\n \"s\": 3495,\n \"text\": \"shivamgupta2\"\n },\n {\n \"code\": null,\n \"e\": 3520,\n \"s\": 3508,\n \"text\": \"cpp-virtual\"\n },\n {\n \"code\": null,\n \"e\": 3524,\n \"s\": 3520,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 3528,\n \"s\": 3524,\n \"text\": \"CPP\"\n }\n]"}}},{"rowIdx":22,"cells":{"title":{"kind":"string","value":"PLSQL | UPPER Function"},"text":{"kind":"string","value":"11 Oct, 2019\nThe PLSQL UPPER function is used for converting all letters in the specified string to uppercase. If there are characters in the string that are not letters, they are unaffected by this function.\nThe char to be converted can be any of the datatypes such as CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB. The value returned by the UPPER function is the same datatype as char. The database sets the case of the characters based on the binary mapping defined for the underlying character set.\nSyntax:\nUPPER( string )\nParameters Used:\nstring – It is used to specify the string which needs to be converted.\nReturn Value:The UPPER function in PLSQL returns a string value.\nSupported Versions of Oracle/PLSQL\nOracle 12cOracle 11gOracle 10gOracle 9iOracle 8i\nOracle 12c\nOracle 11g\nOracle 10g\nOracle 9i\nOracle 8i\nExample-1: Passing a string as an argument with first character in uppercase and rest of the characters in lowercase.\nDECLARE \n Test_String string(20) := 'Geeksforgeeks';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; \nOutput:\nGEEKSFORGEEKS \nExample-2: Passing a string as an argument with all the characters in lowercase.\nDECLARE \n Test_String string(20) := 'geeksforgeeks';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; \nOutput:\nGEEKSFORGEEKS \nExample-3: Passing a string as an argument with numeric values and characters in lowercase.\nDECLARE \n Test_String string(20) := '123geeksforgeeks123';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; \nOutput:\n123GEEKSFORGEEKS123 \nAdvantage:The UPPER function accepts any of the datatypes CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB in the input_string.\nSQL-PL/SQL\nSQL\nSQL\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":28,"s":0,"text":"\n11 Oct, 2019"},{"code":null,"e":224,"s":28,"text":"The PLSQL UPPER function is used for converting all letters in the specified string to uppercase. If there are characters in the string that are not letters, they are unaffected by this function."},{"code":null,"e":521,"s":224,"text":"The char to be converted can be any of the datatypes such as CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB. The value returned by the UPPER function is the same datatype as char. The database sets the case of the characters based on the binary mapping defined for the underlying character set."},{"code":null,"e":529,"s":521,"text":"Syntax:"},{"code":null,"e":545,"s":529,"text":"UPPER( string )"},{"code":null,"e":562,"s":545,"text":"Parameters Used:"},{"code":null,"e":633,"s":562,"text":"string – It is used to specify the string which needs to be converted."},{"code":null,"e":698,"s":633,"text":"Return Value:The UPPER function in PLSQL returns a string value."},{"code":null,"e":733,"s":698,"text":"Supported Versions of Oracle/PLSQL"},{"code":null,"e":782,"s":733,"text":"Oracle 12cOracle 11gOracle 10gOracle 9iOracle 8i"},{"code":null,"e":793,"s":782,"text":"Oracle 12c"},{"code":null,"e":804,"s":793,"text":"Oracle 11g"},{"code":null,"e":815,"s":804,"text":"Oracle 10g"},{"code":null,"e":825,"s":815,"text":"Oracle 9i"},{"code":null,"e":835,"s":825,"text":"Oracle 8i"},{"code":null,"e":953,"s":835,"text":"Example-1: Passing a string as an argument with first character in uppercase and rest of the characters in lowercase."},{"code":null,"e":1076,"s":953,"text":"DECLARE \n Test_String string(20) := 'Geeksforgeeks';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; "},{"code":null,"e":1084,"s":1076,"text":"Output:"},{"code":null,"e":1099,"s":1084,"text":"GEEKSFORGEEKS "},{"code":null,"e":1180,"s":1099,"text":"Example-2: Passing a string as an argument with all the characters in lowercase."},{"code":null,"e":1304,"s":1180,"text":"DECLARE \n Test_String string(20) := 'geeksforgeeks';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; "},{"code":null,"e":1312,"s":1304,"text":"Output:"},{"code":null,"e":1327,"s":1312,"text":"GEEKSFORGEEKS "},{"code":null,"e":1419,"s":1327,"text":"Example-3: Passing a string as an argument with numeric values and characters in lowercase."},{"code":null,"e":1547,"s":1419,"text":"DECLARE \n Test_String string(20) := '123geeksforgeeks123';\n \nBEGIN \n dbms_output.put_line(UPPER(Test_String)); \n \nEND; "},{"code":null,"e":1555,"s":1547,"text":"Output:"},{"code":null,"e":1576,"s":1555,"text":"123GEEKSFORGEEKS123 "},{"code":null,"e":1704,"s":1576,"text":"Advantage:The UPPER function accepts any of the datatypes CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB in the input_string."},{"code":null,"e":1715,"s":1704,"text":"SQL-PL/SQL"},{"code":null,"e":1719,"s":1715,"text":"SQL"},{"code":null,"e":1723,"s":1719,"text":"SQL"}],"string":"[\n {\n \"code\": null,\n \"e\": 28,\n \"s\": 0,\n \"text\": \"\\n11 Oct, 2019\"\n },\n {\n \"code\": null,\n \"e\": 224,\n \"s\": 28,\n \"text\": \"The PLSQL UPPER function is used for converting all letters in the specified string to uppercase. If there are characters in the string that are not letters, they are unaffected by this function.\"\n },\n {\n \"code\": null,\n \"e\": 521,\n \"s\": 224,\n \"text\": \"The char to be converted can be any of the datatypes such as CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB. The value returned by the UPPER function is the same datatype as char. The database sets the case of the characters based on the binary mapping defined for the underlying character set.\"\n },\n {\n \"code\": null,\n \"e\": 529,\n \"s\": 521,\n \"text\": \"Syntax:\"\n },\n {\n \"code\": null,\n \"e\": 545,\n \"s\": 529,\n \"text\": \"UPPER( string )\"\n },\n {\n \"code\": null,\n \"e\": 562,\n \"s\": 545,\n \"text\": \"Parameters Used:\"\n },\n {\n \"code\": null,\n \"e\": 633,\n \"s\": 562,\n \"text\": \"string – It is used to specify the string which needs to be converted.\"\n },\n {\n \"code\": null,\n \"e\": 698,\n \"s\": 633,\n \"text\": \"Return Value:The UPPER function in PLSQL returns a string value.\"\n },\n {\n \"code\": null,\n \"e\": 733,\n \"s\": 698,\n \"text\": \"Supported Versions of Oracle/PLSQL\"\n },\n {\n \"code\": null,\n \"e\": 782,\n \"s\": 733,\n \"text\": \"Oracle 12cOracle 11gOracle 10gOracle 9iOracle 8i\"\n },\n {\n \"code\": null,\n \"e\": 793,\n \"s\": 782,\n \"text\": \"Oracle 12c\"\n },\n {\n \"code\": null,\n \"e\": 804,\n \"s\": 793,\n \"text\": \"Oracle 11g\"\n },\n {\n \"code\": null,\n \"e\": 815,\n \"s\": 804,\n \"text\": \"Oracle 10g\"\n },\n {\n \"code\": null,\n \"e\": 825,\n \"s\": 815,\n \"text\": \"Oracle 9i\"\n },\n {\n \"code\": null,\n \"e\": 835,\n \"s\": 825,\n \"text\": \"Oracle 8i\"\n },\n {\n \"code\": null,\n \"e\": 953,\n \"s\": 835,\n \"text\": \"Example-1: Passing a string as an argument with first character in uppercase and rest of the characters in lowercase.\"\n },\n {\n \"code\": null,\n \"e\": 1076,\n \"s\": 953,\n \"text\": \"DECLARE \\n Test_String string(20) := 'Geeksforgeeks';\\n \\nBEGIN \\n dbms_output.put_line(UPPER(Test_String)); \\n \\nEND; \"\n },\n {\n \"code\": null,\n \"e\": 1084,\n \"s\": 1076,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1099,\n \"s\": 1084,\n \"text\": \"GEEKSFORGEEKS \"\n },\n {\n \"code\": null,\n \"e\": 1180,\n \"s\": 1099,\n \"text\": \"Example-2: Passing a string as an argument with all the characters in lowercase.\"\n },\n {\n \"code\": null,\n \"e\": 1304,\n \"s\": 1180,\n \"text\": \"DECLARE \\n Test_String string(20) := 'geeksforgeeks';\\n \\nBEGIN \\n dbms_output.put_line(UPPER(Test_String)); \\n \\nEND; \"\n },\n {\n \"code\": null,\n \"e\": 1312,\n \"s\": 1304,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1327,\n \"s\": 1312,\n \"text\": \"GEEKSFORGEEKS \"\n },\n {\n \"code\": null,\n \"e\": 1419,\n \"s\": 1327,\n \"text\": \"Example-3: Passing a string as an argument with numeric values and characters in lowercase.\"\n },\n {\n \"code\": null,\n \"e\": 1547,\n \"s\": 1419,\n \"text\": \"DECLARE \\n Test_String string(20) := '123geeksforgeeks123';\\n \\nBEGIN \\n dbms_output.put_line(UPPER(Test_String)); \\n \\nEND; \"\n },\n {\n \"code\": null,\n \"e\": 1555,\n \"s\": 1547,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1576,\n \"s\": 1555,\n \"text\": \"123GEEKSFORGEEKS123 \"\n },\n {\n \"code\": null,\n \"e\": 1704,\n \"s\": 1576,\n \"text\": \"Advantage:The UPPER function accepts any of the datatypes CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB in the input_string.\"\n },\n {\n \"code\": null,\n \"e\": 1715,\n \"s\": 1704,\n \"text\": \"SQL-PL/SQL\"\n },\n {\n \"code\": null,\n \"e\": 1719,\n \"s\": 1715,\n \"text\": \"SQL\"\n },\n {\n \"code\": null,\n \"e\": 1723,\n \"s\": 1719,\n \"text\": \"SQL\"\n }\n]"}}},{"rowIdx":23,"cells":{"title":{"kind":"string","value":"How to Solve java.lang.NoSuchMethodError in Java?"},"text":{"kind":"string","value":"28 Jul, 2021\nA java.lang.NoSuchMethodError as the name suggests, is a runtime error in Java which occurs when a method is called that exists at compile-time, but does not exist at runtime. The java.lang.NoSuchMethodError can occur in case application code is partially compiled, or in case an external dependency in a project incompatibly changed the code (e.g. removed the calling method) from one version to another. It is as shown in the illustration below as follows:\nIllustration:\njava.lang\nClass NoSuchMethodError\n java.lang.Object\n java.lang.Throwable\n java.lang.Error\n java.lang.LinkageError\n java.lang.IncompatibleClassChangeError\n java.lang.NoSuchMethodError\nNote: All Implemented Interfaces is Serializable interface in Java.\nNow let us discuss the causes behind this exception in order to figure out how to resolve the same problem. java.lang. It occurs when a particular method is not found. This method can either be an instance method or a static method. The java.lang.NoSuchMethodError occurs when an application does not find a method at runtime. In most cases, we’re able to catch this error at compile-time. Hence, it’s not a big issue. However, sometimes it could be thrown at runtime, then finding it becomes a bit difficult. According to the Oracle documentation, this error may occur at runtime if a class has been incomparably changed. Hence, we may encounter this error in the following cases. Firstly, if we do just a partial recompilation of our code. Secondly, if there is version incompatibility with the dependencies in our application, such as the external jars.\nNote: NoSuchMethodError inheritance tree includes IncompatibleClassChangeError and LinkageError. These errors are associated with an incompatible class change after compilation.\nImplementation:\nNow we will be proposing two examples in which first we will illustrate the thrown exception and, in later example, resolve the same via clean java problems.\nExample 1\nJava\n// Java Program to Demonstrate NoSuchMethodError by // throwing it due to a breaking change // introduced within an application // Importingn I/O classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Method 1 // Void demo method created to be called // in another class containing main() method public void printer(String myString) { // Print statement System.out.println(myString); }} // Class 2// Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of class 1 NoSuchMethodError obj = new NoSuchMethodError(); // Now calling print() method which is not present // in NoSuchMethodErrorExample class, hence throwing // exception obj.print(\"Hello World\"); }}\nOutput:\nNow if we try to draw out conclusions about the possible solution to resolve the above error. For that, we need to take care of two parameters as listed: \nCall correct method which is present in class.\nCheck the name of the method and its signature which you are trying to call.\nExample 2\nJava\n// Java Program to Resolve NoSuchMethodError // Importing input output classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Defined printer method public void printer(String myString) { // Print the string which will be passed // in the main() method System.out.println(myString); }} // Class 2// Main Classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of above class in // main() method of this class NoSuchMethodError obj = new NoSuchMethodError(); // Calling printer() method which is present in // NoSuchMethodErrorExample class obj.printer(\"Hello World\"); }}\nHello World\n\nJava-Exception Handling\nPicked\nJava\nJava\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":53,"s":25,"text":"\n28 Jul, 2021"},{"code":null,"e":512,"s":53,"text":"A java.lang.NoSuchMethodError as the name suggests, is a runtime error in Java which occurs when a method is called that exists at compile-time, but does not exist at runtime. The java.lang.NoSuchMethodError can occur in case application code is partially compiled, or in case an external dependency in a project incompatibly changed the code (e.g. removed the calling method) from one version to another. It is as shown in the illustration below as follows:"},{"code":null,"e":526,"s":512,"text":"Illustration:"},{"code":null,"e":787,"s":526,"text":"java.lang\nClass NoSuchMethodError\n java.lang.Object\n java.lang.Throwable\n java.lang.Error\n java.lang.LinkageError\n java.lang.IncompatibleClassChangeError\n java.lang.NoSuchMethodError"},{"code":null,"e":855,"s":787,"text":"Note: All Implemented Interfaces is Serializable interface in Java."},{"code":null,"e":1712,"s":855,"text":"Now let us discuss the causes behind this exception in order to figure out how to resolve the same problem. java.lang. It occurs when a particular method is not found. This method can either be an instance method or a static method. The java.lang.NoSuchMethodError occurs when an application does not find a method at runtime. In most cases, we’re able to catch this error at compile-time. Hence, it’s not a big issue. However, sometimes it could be thrown at runtime, then finding it becomes a bit difficult. According to the Oracle documentation, this error may occur at runtime if a class has been incomparably changed. Hence, we may encounter this error in the following cases. Firstly, if we do just a partial recompilation of our code. Secondly, if there is version incompatibility with the dependencies in our application, such as the external jars."},{"code":null,"e":1890,"s":1712,"text":"Note: NoSuchMethodError inheritance tree includes IncompatibleClassChangeError and LinkageError. These errors are associated with an incompatible class change after compilation."},{"code":null,"e":1906,"s":1890,"text":"Implementation:"},{"code":null,"e":2064,"s":1906,"text":"Now we will be proposing two examples in which first we will illustrate the thrown exception and, in later example, resolve the same via clean java problems."},{"code":null,"e":2074,"s":2064,"text":"Example 1"},{"code":null,"e":2079,"s":2074,"text":"Java"},{"code":"// Java Program to Demonstrate NoSuchMethodError by // throwing it due to a breaking change // introduced within an application // Importingn I/O classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Method 1 // Void demo method created to be called // in another class containing main() method public void printer(String myString) { // Print statement System.out.println(myString); }} // Class 2// Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of class 1 NoSuchMethodError obj = new NoSuchMethodError(); // Now calling print() method which is not present // in NoSuchMethodErrorExample class, hence throwing // exception obj.print(\"Hello World\"); }}","e":2919,"s":2079,"text":null},{"code":null,"e":2927,"s":2919,"text":"Output:"},{"code":null,"e":3082,"s":2927,"text":"Now if we try to draw out conclusions about the possible solution to resolve the above error. For that, we need to take care of two parameters as listed: "},{"code":null,"e":3129,"s":3082,"text":"Call correct method which is present in class."},{"code":null,"e":3206,"s":3129,"text":"Check the name of the method and its signature which you are trying to call."},{"code":null,"e":3216,"s":3206,"text":"Example 2"},{"code":null,"e":3221,"s":3216,"text":"Java"},{"code":"// Java Program to Resolve NoSuchMethodError // Importing input output classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Defined printer method public void printer(String myString) { // Print the string which will be passed // in the main() method System.out.println(myString); }} // Class 2// Main Classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of above class in // main() method of this class NoSuchMethodError obj = new NoSuchMethodError(); // Calling printer() method which is present in // NoSuchMethodErrorExample class obj.printer(\"Hello World\"); }}","e":3972,"s":3221,"text":null},{"code":null,"e":3985,"s":3972,"text":"Hello World\n"},{"code":null,"e":4009,"s":3985,"text":"Java-Exception Handling"},{"code":null,"e":4016,"s":4009,"text":"Picked"},{"code":null,"e":4021,"s":4016,"text":"Java"},{"code":null,"e":4026,"s":4021,"text":"Java"}],"string":"[\n {\n \"code\": null,\n \"e\": 53,\n \"s\": 25,\n \"text\": \"\\n28 Jul, 2021\"\n },\n {\n \"code\": null,\n \"e\": 512,\n \"s\": 53,\n \"text\": \"A java.lang.NoSuchMethodError as the name suggests, is a runtime error in Java which occurs when a method is called that exists at compile-time, but does not exist at runtime. The java.lang.NoSuchMethodError can occur in case application code is partially compiled, or in case an external dependency in a project incompatibly changed the code (e.g. removed the calling method) from one version to another. It is as shown in the illustration below as follows:\"\n },\n {\n \"code\": null,\n \"e\": 526,\n \"s\": 512,\n \"text\": \"Illustration:\"\n },\n {\n \"code\": null,\n \"e\": 787,\n \"s\": 526,\n \"text\": \"java.lang\\nClass NoSuchMethodError\\n java.lang.Object\\n java.lang.Throwable\\n java.lang.Error\\n java.lang.LinkageError\\n java.lang.IncompatibleClassChangeError\\n java.lang.NoSuchMethodError\"\n },\n {\n \"code\": null,\n \"e\": 855,\n \"s\": 787,\n \"text\": \"Note: All Implemented Interfaces is Serializable interface in Java.\"\n },\n {\n \"code\": null,\n \"e\": 1712,\n \"s\": 855,\n \"text\": \"Now let us discuss the causes behind this exception in order to figure out how to resolve the same problem. java.lang. It occurs when a particular method is not found. This method can either be an instance method or a static method. The java.lang.NoSuchMethodError occurs when an application does not find a method at runtime. In most cases, we’re able to catch this error at compile-time. Hence, it’s not a big issue. However, sometimes it could be thrown at runtime, then finding it becomes a bit difficult. According to the Oracle documentation, this error may occur at runtime if a class has been incomparably changed. Hence, we may encounter this error in the following cases. Firstly, if we do just a partial recompilation of our code. Secondly, if there is version incompatibility with the dependencies in our application, such as the external jars.\"\n },\n {\n \"code\": null,\n \"e\": 1890,\n \"s\": 1712,\n \"text\": \"Note: NoSuchMethodError inheritance tree includes IncompatibleClassChangeError and LinkageError. These errors are associated with an incompatible class change after compilation.\"\n },\n {\n \"code\": null,\n \"e\": 1906,\n \"s\": 1890,\n \"text\": \"Implementation:\"\n },\n {\n \"code\": null,\n \"e\": 2064,\n \"s\": 1906,\n \"text\": \"Now we will be proposing two examples in which first we will illustrate the thrown exception and, in later example, resolve the same via clean java problems.\"\n },\n {\n \"code\": null,\n \"e\": 2074,\n \"s\": 2064,\n \"text\": \"Example 1\"\n },\n {\n \"code\": null,\n \"e\": 2079,\n \"s\": 2074,\n \"text\": \"Java\"\n },\n {\n \"code\": \"// Java Program to Demonstrate NoSuchMethodError by // throwing it due to a breaking change // introduced within an application // Importingn I/O classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Method 1 // Void demo method created to be called // in another class containing main() method public void printer(String myString) { // Print statement System.out.println(myString); }} // Class 2// Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of class 1 NoSuchMethodError obj = new NoSuchMethodError(); // Now calling print() method which is not present // in NoSuchMethodErrorExample class, hence throwing // exception obj.print(\\\"Hello World\\\"); }}\",\n \"e\": 2919,\n \"s\": 2079,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2927,\n \"s\": 2919,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 3082,\n \"s\": 2927,\n \"text\": \"Now if we try to draw out conclusions about the possible solution to resolve the above error. For that, we need to take care of two parameters as listed: \"\n },\n {\n \"code\": null,\n \"e\": 3129,\n \"s\": 3082,\n \"text\": \"Call correct method which is present in class.\"\n },\n {\n \"code\": null,\n \"e\": 3206,\n \"s\": 3129,\n \"text\": \"Check the name of the method and its signature which you are trying to call.\"\n },\n {\n \"code\": null,\n \"e\": 3216,\n \"s\": 3206,\n \"text\": \"Example 2\"\n },\n {\n \"code\": null,\n \"e\": 3221,\n \"s\": 3216,\n \"text\": \"Java\"\n },\n {\n \"code\": \"// Java Program to Resolve NoSuchMethodError // Importing input output classesimport java.io.*; // Class 1// Helper classclass NoSuchMethodError { // Defined printer method public void printer(String myString) { // Print the string which will be passed // in the main() method System.out.println(myString); }} // Class 2// Main Classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of above class in // main() method of this class NoSuchMethodError obj = new NoSuchMethodError(); // Calling printer() method which is present in // NoSuchMethodErrorExample class obj.printer(\\\"Hello World\\\"); }}\",\n \"e\": 3972,\n \"s\": 3221,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 3985,\n \"s\": 3972,\n \"text\": \"Hello World\\n\"\n },\n {\n \"code\": null,\n \"e\": 4009,\n \"s\": 3985,\n \"text\": \"Java-Exception Handling\"\n },\n {\n \"code\": null,\n \"e\": 4016,\n \"s\": 4009,\n \"text\": \"Picked\"\n },\n {\n \"code\": null,\n \"e\": 4021,\n \"s\": 4016,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 4026,\n \"s\": 4021,\n \"text\": \"Java\"\n }\n]"}}},{"rowIdx":24,"cells":{"title":{"kind":"string","value":"How to get list of parameters name from a function in Python?"},"text":{"kind":"string","value":"29 Dec, 2020\nIn this article, we are going to discuss how to get list parameters from a function in Python. The inspect module helps in checking the objects present in the code that we have written. We are going to use two methods i.e. signature() and getargspec() methods from the inspect module to get the list of parameters name of function or method passed as an argument in one of the methods.\nBelow are some programs which depict how to use the signature() method of the inspect module to get the list of parameters name:\nExample 1: Getting the parameter list of a method.\nPython3\n# import required modulesimport inspectimport collections # use signature()print(inspect.signature(collections.Counter))\nOutput:\n(*args, **kwds)\nExample 2: Getting the parameter list of an explicit function.\nPython3\n# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use signature() print(inspect.signature(fun)) \nOutput:\n(a, b)\nExample 3: Getting the parameter list of an in-built function.\nPython3\n# import required modules import inspect # use signature() print(inspect.signature(len))\nOutput:\n(obj, /)\nBelow are some programs which depict how to use the getargspec() method of the inspect module to get the list of parameters name:\nExample 1: Getting the parameter list of a method.\nPython3\n# import required modulesimport inspectimport collections # use getargspec()print(inspect.getargspec(collections.Counter))\nOutput:\nArgSpec(args=[], varargs=’args’, keywords=’kwds’, defaults=None)\nExample 2: Getting the parameter list of an explicit function.\nPython3\n# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use getargspec() print(inspect.getargspec(fun)) \nOutput:\nArgSpec(args=[‘a’, ‘b’], varargs=None, keywords=None, defaults=None)\nExample 3: Getting the parameter list of an in-built function.\nPython3\n# import required modules import inspect # use getargspec() print(inspect.getargspec(len))\nOutput:\nArgSpec(args=[‘obj’], varargs=None, keywords=None, defaults=None)\nPicked\nPython function-programs\nPython-Functions\nTechnical Scripter 2020\nPython\nTechnical Scripter\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nHow to Install PIP on Windows ?\nPython Classes and Objects\nIntroduction To PYTHON\nPython OOPs Concepts\nPython | os.path.join() method\nHow to drop one or multiple columns in Pandas Dataframe\nHow To Convert Python Dictionary To JSON?\nCheck if element exists in list in Python\nPython | Get unique values from a list\nPython | datetime.timedelta() function"},"parsed":{"kind":"list like","value":[{"code":null,"e":53,"s":25,"text":"\n29 Dec, 2020"},{"code":null,"e":439,"s":53,"text":"In this article, we are going to discuss how to get list parameters from a function in Python. The inspect module helps in checking the objects present in the code that we have written. We are going to use two methods i.e. signature() and getargspec() methods from the inspect module to get the list of parameters name of function or method passed as an argument in one of the methods."},{"code":null,"e":569,"s":439,"text":"Below are some programs which depict how to use the signature() method of the inspect module to get the list of parameters name:"},{"code":null,"e":620,"s":569,"text":"Example 1: Getting the parameter list of a method."},{"code":null,"e":628,"s":620,"text":"Python3"},{"code":"# import required modulesimport inspectimport collections # use signature()print(inspect.signature(collections.Counter))","e":750,"s":628,"text":null},{"code":null,"e":758,"s":750,"text":"Output:"},{"code":null,"e":774,"s":758,"text":"(*args, **kwds)"},{"code":null,"e":837,"s":774,"text":"Example 2: Getting the parameter list of an explicit function."},{"code":null,"e":845,"s":837,"text":"Python3"},{"code":"# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use signature() print(inspect.signature(fun)) ","e":987,"s":845,"text":null},{"code":null,"e":995,"s":987,"text":"Output:"},{"code":null,"e":1002,"s":995,"text":"(a, b)"},{"code":null,"e":1065,"s":1002,"text":"Example 3: Getting the parameter list of an in-built function."},{"code":null,"e":1073,"s":1065,"text":"Python3"},{"code":"# import required modules import inspect # use signature() print(inspect.signature(len))","e":1164,"s":1073,"text":null},{"code":null,"e":1172,"s":1164,"text":"Output:"},{"code":null,"e":1181,"s":1172,"text":"(obj, /)"},{"code":null,"e":1312,"s":1181,"text":"Below are some programs which depict how to use the getargspec() method of the inspect module to get the list of parameters name:"},{"code":null,"e":1363,"s":1312,"text":"Example 1: Getting the parameter list of a method."},{"code":null,"e":1371,"s":1363,"text":"Python3"},{"code":"# import required modulesimport inspectimport collections # use getargspec()print(inspect.getargspec(collections.Counter))","e":1495,"s":1371,"text":null},{"code":null,"e":1503,"s":1495,"text":"Output:"},{"code":null,"e":1568,"s":1503,"text":"ArgSpec(args=[], varargs=’args’, keywords=’kwds’, defaults=None)"},{"code":null,"e":1631,"s":1568,"text":"Example 2: Getting the parameter list of an explicit function."},{"code":null,"e":1639,"s":1631,"text":"Python3"},{"code":"# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use getargspec() print(inspect.getargspec(fun)) ","e":1783,"s":1639,"text":null},{"code":null,"e":1791,"s":1783,"text":"Output:"},{"code":null,"e":1860,"s":1791,"text":"ArgSpec(args=[‘a’, ‘b’], varargs=None, keywords=None, defaults=None)"},{"code":null,"e":1923,"s":1860,"text":"Example 3: Getting the parameter list of an in-built function."},{"code":null,"e":1931,"s":1923,"text":"Python3"},{"code":"# import required modules import inspect # use getargspec() print(inspect.getargspec(len))","e":2024,"s":1931,"text":null},{"code":null,"e":2032,"s":2024,"text":"Output:"},{"code":null,"e":2098,"s":2032,"text":"ArgSpec(args=[‘obj’], varargs=None, keywords=None, defaults=None)"},{"code":null,"e":2105,"s":2098,"text":"Picked"},{"code":null,"e":2130,"s":2105,"text":"Python function-programs"},{"code":null,"e":2147,"s":2130,"text":"Python-Functions"},{"code":null,"e":2171,"s":2147,"text":"Technical Scripter 2020"},{"code":null,"e":2178,"s":2171,"text":"Python"},{"code":null,"e":2197,"s":2178,"text":"Technical Scripter"},{"code":null,"e":2295,"s":2197,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":2327,"s":2295,"text":"How to Install PIP on Windows ?"},{"code":null,"e":2354,"s":2327,"text":"Python Classes and Objects"},{"code":null,"e":2377,"s":2354,"text":"Introduction To PYTHON"},{"code":null,"e":2398,"s":2377,"text":"Python OOPs Concepts"},{"code":null,"e":2429,"s":2398,"text":"Python | os.path.join() method"},{"code":null,"e":2485,"s":2429,"text":"How to drop one or multiple columns in Pandas Dataframe"},{"code":null,"e":2527,"s":2485,"text":"How To Convert Python Dictionary To JSON?"},{"code":null,"e":2569,"s":2527,"text":"Check if element exists in list in Python"},{"code":null,"e":2608,"s":2569,"text":"Python | Get unique values from a list"}],"string":"[\n {\n \"code\": null,\n \"e\": 53,\n \"s\": 25,\n \"text\": \"\\n29 Dec, 2020\"\n },\n {\n \"code\": null,\n \"e\": 439,\n \"s\": 53,\n \"text\": \"In this article, we are going to discuss how to get list parameters from a function in Python. The inspect module helps in checking the objects present in the code that we have written. We are going to use two methods i.e. signature() and getargspec() methods from the inspect module to get the list of parameters name of function or method passed as an argument in one of the methods.\"\n },\n {\n \"code\": null,\n \"e\": 569,\n \"s\": 439,\n \"text\": \"Below are some programs which depict how to use the signature() method of the inspect module to get the list of parameters name:\"\n },\n {\n \"code\": null,\n \"e\": 620,\n \"s\": 569,\n \"text\": \"Example 1: Getting the parameter list of a method.\"\n },\n {\n \"code\": null,\n \"e\": 628,\n \"s\": 620,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# import required modulesimport inspectimport collections # use signature()print(inspect.signature(collections.Counter))\",\n \"e\": 750,\n \"s\": 628,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 758,\n \"s\": 750,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 774,\n \"s\": 758,\n \"text\": \"(*args, **kwds)\"\n },\n {\n \"code\": null,\n \"e\": 837,\n \"s\": 774,\n \"text\": \"Example 2: Getting the parameter list of an explicit function.\"\n },\n {\n \"code\": null,\n \"e\": 845,\n \"s\": 837,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use signature() print(inspect.signature(fun)) \",\n \"e\": 987,\n \"s\": 845,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 995,\n \"s\": 987,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1002,\n \"s\": 995,\n \"text\": \"(a, b)\"\n },\n {\n \"code\": null,\n \"e\": 1065,\n \"s\": 1002,\n \"text\": \"Example 3: Getting the parameter list of an in-built function.\"\n },\n {\n \"code\": null,\n \"e\": 1073,\n \"s\": 1065,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# import required modules import inspect # use signature() print(inspect.signature(len))\",\n \"e\": 1164,\n \"s\": 1073,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1172,\n \"s\": 1164,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1181,\n \"s\": 1172,\n \"text\": \"(obj, /)\"\n },\n {\n \"code\": null,\n \"e\": 1312,\n \"s\": 1181,\n \"text\": \"Below are some programs which depict how to use the getargspec() method of the inspect module to get the list of parameters name:\"\n },\n {\n \"code\": null,\n \"e\": 1363,\n \"s\": 1312,\n \"text\": \"Example 1: Getting the parameter list of a method.\"\n },\n {\n \"code\": null,\n \"e\": 1371,\n \"s\": 1363,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# import required modulesimport inspectimport collections # use getargspec()print(inspect.getargspec(collections.Counter))\",\n \"e\": 1495,\n \"s\": 1371,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1503,\n \"s\": 1495,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1568,\n \"s\": 1503,\n \"text\": \"ArgSpec(args=[], varargs=’args’, keywords=’kwds’, defaults=None)\"\n },\n {\n \"code\": null,\n \"e\": 1631,\n \"s\": 1568,\n \"text\": \"Example 2: Getting the parameter list of an explicit function.\"\n },\n {\n \"code\": null,\n \"e\": 1639,\n \"s\": 1631,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# explicit functiondef fun(a, b): return a**b # import required modules import inspect # use getargspec() print(inspect.getargspec(fun)) \",\n \"e\": 1783,\n \"s\": 1639,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1791,\n \"s\": 1783,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1860,\n \"s\": 1791,\n \"text\": \"ArgSpec(args=[‘a’, ‘b’], varargs=None, keywords=None, defaults=None)\"\n },\n {\n \"code\": null,\n \"e\": 1923,\n \"s\": 1860,\n \"text\": \"Example 3: Getting the parameter list of an in-built function.\"\n },\n {\n \"code\": null,\n \"e\": 1931,\n \"s\": 1923,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# import required modules import inspect # use getargspec() print(inspect.getargspec(len))\",\n \"e\": 2024,\n \"s\": 1931,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2032,\n \"s\": 2024,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 2098,\n \"s\": 2032,\n \"text\": \"ArgSpec(args=[‘obj’], varargs=None, keywords=None, defaults=None)\"\n },\n {\n \"code\": null,\n \"e\": 2105,\n \"s\": 2098,\n \"text\": \"Picked\"\n },\n {\n \"code\": null,\n \"e\": 2130,\n \"s\": 2105,\n \"text\": \"Python function-programs\"\n },\n {\n \"code\": null,\n \"e\": 2147,\n \"s\": 2130,\n \"text\": \"Python-Functions\"\n },\n {\n \"code\": null,\n \"e\": 2171,\n \"s\": 2147,\n \"text\": \"Technical Scripter 2020\"\n },\n {\n \"code\": null,\n \"e\": 2178,\n \"s\": 2171,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 2197,\n \"s\": 2178,\n \"text\": \"Technical Scripter\"\n },\n {\n \"code\": null,\n \"e\": 2295,\n \"s\": 2197,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 2327,\n \"s\": 2295,\n \"text\": \"How to Install PIP on Windows ?\"\n },\n {\n \"code\": null,\n \"e\": 2354,\n \"s\": 2327,\n \"text\": \"Python Classes and Objects\"\n },\n {\n \"code\": null,\n \"e\": 2377,\n \"s\": 2354,\n \"text\": \"Introduction To PYTHON\"\n },\n {\n \"code\": null,\n \"e\": 2398,\n \"s\": 2377,\n \"text\": \"Python OOPs Concepts\"\n },\n {\n \"code\": null,\n \"e\": 2429,\n \"s\": 2398,\n \"text\": \"Python | os.path.join() method\"\n },\n {\n \"code\": null,\n \"e\": 2485,\n \"s\": 2429,\n \"text\": \"How to drop one or multiple columns in Pandas Dataframe\"\n },\n {\n \"code\": null,\n \"e\": 2527,\n \"s\": 2485,\n \"text\": \"How To Convert Python Dictionary To JSON?\"\n },\n {\n \"code\": null,\n \"e\": 2569,\n \"s\": 2527,\n \"text\": \"Check if element exists in list in Python\"\n },\n {\n \"code\": null,\n \"e\": 2608,\n \"s\": 2569,\n \"text\": \"Python | Get unique values from a list\"\n }\n]"}}},{"rowIdx":25,"cells":{"title":{"kind":"string","value":"settextstyle function in C"},"text":{"kind":"string","value":"01 Dec, 2021\nThe header file graphics.h contains settextstyle() function which is used to change the way in which text appears. Using it we can modify the size of text, change direction of text and change the font of text. Syntax : \nvoid settextstyle(int font, int direction, int font_size);\n\nwhere,\nfont argument specifies the font of text,\nDirection can be HORIZ_DIR (Left to right) \nor VERT_DIR (Bottom to top).\nExamples : \nInput : font = 8, direction = 0, font_size = 5\nOutput : \nInput : font = 3, direction = 0, font_size = 5\nOutput : \nThe table below shows the fonts with their INT values and appearance:\nBelow is the implementation of settextstyle() function : \nCPP\n// C++ implementation for// settextstyle() function#include // driver codeint main(){ // gm is Graphics mode which is // a computer display mode that // generates image using pixels. // DETECT is a macro defined in // \"graphics.h\" header file int gd = DETECT, gm; // initgraph initializes the // graphics system by loading // a graphics driver from disk initgraph(&gd, &gm, \"\"); // location of text int x = 150; int y = 150; // font style int font = 8; // font direction int direction = 0; // font size int font_size = 5; // for setting text style settextstyle(font, direction, font_size); // for printing text in graphics window outtextxy(x, y, \"Geeks For Geeks\"); getch(); // closegraph function closes the // graphics mode and deallocates // all memory allocated by graphics // system . closegraph(); return 0;}\nOutput: \nHax4us\ntheshaunsaw\nc-graphics\ncomputer-graphics\nC Language\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":52,"s":24,"text":"\n01 Dec, 2021"},{"code":null,"e":273,"s":52,"text":"The header file graphics.h contains settextstyle() function which is used to change the way in which text appears. Using it we can modify the size of text, change direction of text and change the font of text. Syntax : "},{"code":null,"e":455,"s":273,"text":"void settextstyle(int font, int direction, int font_size);\n\nwhere,\nfont argument specifies the font of text,\nDirection can be HORIZ_DIR (Left to right) \nor VERT_DIR (Bottom to top)."},{"code":null,"e":468,"s":455,"text":"Examples : "},{"code":null,"e":525,"s":468,"text":"Input : font = 8, direction = 0, font_size = 5\nOutput : "},{"code":null,"e":582,"s":525,"text":"Input : font = 3, direction = 0, font_size = 5\nOutput : "},{"code":null,"e":652,"s":582,"text":"The table below shows the fonts with their INT values and appearance:"},{"code":null,"e":711,"s":652,"text":"Below is the implementation of settextstyle() function : "},{"code":null,"e":715,"s":711,"text":"CPP"},{"code":"// C++ implementation for// settextstyle() function#include // driver codeint main(){ // gm is Graphics mode which is // a computer display mode that // generates image using pixels. // DETECT is a macro defined in // \"graphics.h\" header file int gd = DETECT, gm; // initgraph initializes the // graphics system by loading // a graphics driver from disk initgraph(&gd, &gm, \"\"); // location of text int x = 150; int y = 150; // font style int font = 8; // font direction int direction = 0; // font size int font_size = 5; // for setting text style settextstyle(font, direction, font_size); // for printing text in graphics window outtextxy(x, y, \"Geeks For Geeks\"); getch(); // closegraph function closes the // graphics mode and deallocates // all memory allocated by graphics // system . closegraph(); return 0;}","e":1651,"s":715,"text":null},{"code":null,"e":1661,"s":1651,"text":"Output: "},{"code":null,"e":1668,"s":1661,"text":"Hax4us"},{"code":null,"e":1680,"s":1668,"text":"theshaunsaw"},{"code":null,"e":1691,"s":1680,"text":"c-graphics"},{"code":null,"e":1709,"s":1691,"text":"computer-graphics"},{"code":null,"e":1720,"s":1709,"text":"C Language"}],"string":"[\n {\n \"code\": null,\n \"e\": 52,\n \"s\": 24,\n \"text\": \"\\n01 Dec, 2021\"\n },\n {\n \"code\": null,\n \"e\": 273,\n \"s\": 52,\n \"text\": \"The header file graphics.h contains settextstyle() function which is used to change the way in which text appears. Using it we can modify the size of text, change direction of text and change the font of text. Syntax : \"\n },\n {\n \"code\": null,\n \"e\": 455,\n \"s\": 273,\n \"text\": \"void settextstyle(int font, int direction, int font_size);\\n\\nwhere,\\nfont argument specifies the font of text,\\nDirection can be HORIZ_DIR (Left to right) \\nor VERT_DIR (Bottom to top).\"\n },\n {\n \"code\": null,\n \"e\": 468,\n \"s\": 455,\n \"text\": \"Examples : \"\n },\n {\n \"code\": null,\n \"e\": 525,\n \"s\": 468,\n \"text\": \"Input : font = 8, direction = 0, font_size = 5\\nOutput : \"\n },\n {\n \"code\": null,\n \"e\": 582,\n \"s\": 525,\n \"text\": \"Input : font = 3, direction = 0, font_size = 5\\nOutput : \"\n },\n {\n \"code\": null,\n \"e\": 652,\n \"s\": 582,\n \"text\": \"The table below shows the fonts with their INT values and appearance:\"\n },\n {\n \"code\": null,\n \"e\": 711,\n \"s\": 652,\n \"text\": \"Below is the implementation of settextstyle() function : \"\n },\n {\n \"code\": null,\n \"e\": 715,\n \"s\": 711,\n \"text\": \"CPP\"\n },\n {\n \"code\": \"// C++ implementation for// settextstyle() function#include // driver codeint main(){ // gm is Graphics mode which is // a computer display mode that // generates image using pixels. // DETECT is a macro defined in // \\\"graphics.h\\\" header file int gd = DETECT, gm; // initgraph initializes the // graphics system by loading // a graphics driver from disk initgraph(&gd, &gm, \\\"\\\"); // location of text int x = 150; int y = 150; // font style int font = 8; // font direction int direction = 0; // font size int font_size = 5; // for setting text style settextstyle(font, direction, font_size); // for printing text in graphics window outtextxy(x, y, \\\"Geeks For Geeks\\\"); getch(); // closegraph function closes the // graphics mode and deallocates // all memory allocated by graphics // system . closegraph(); return 0;}\",\n \"e\": 1651,\n \"s\": 715,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1661,\n \"s\": 1651,\n \"text\": \"Output: \"\n },\n {\n \"code\": null,\n \"e\": 1668,\n \"s\": 1661,\n \"text\": \"Hax4us\"\n },\n {\n \"code\": null,\n \"e\": 1680,\n \"s\": 1668,\n \"text\": \"theshaunsaw\"\n },\n {\n \"code\": null,\n \"e\": 1691,\n \"s\": 1680,\n \"text\": \"c-graphics\"\n },\n {\n \"code\": null,\n \"e\": 1709,\n \"s\": 1691,\n \"text\": \"computer-graphics\"\n },\n {\n \"code\": null,\n \"e\": 1720,\n \"s\": 1709,\n \"text\": \"C Language\"\n }\n]"}}},{"rowIdx":26,"cells":{"title":{"kind":"string","value":"Python – Convert Snake Case String to Camel Case"},"text":{"kind":"string","value":"01 Oct, 2020\nGiven a snake case string, convert to camel case.\nInput : test_str = ‘geeksforgeeks_is_best_for_geeks’ Output : geeksforgeeksIsBestForGeeks Explanation : String converted to Camel Case.\nInput : test_str = ‘geeksforgeeks_best_for_geeks’ Output : geeksforgeeksBestForGeeks Explanation : String converted to Camel Case. \nMethod #1 : Using split() + join() + title() + generator expression\nThe combination of above functions can be used to solve this problem. In this, we first split all underscores, and then join the string appending initial word, followed by title cased words using generator expression and title().\nPython3\n# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + generator expression # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\"The original string is : \" + str(test_str)) # split underscore using splittemp = test_str.split('_') # joining result res = temp[0] + ''.join(ele.title() for ele in temp[1:]) # printing result print(\"The camel case string is : \" + str(res)) \nThe original string is : geeksforgeeks_is_best\nThe camel case string is : geeksforgeeksIsBest\n\nMethod #2 : Using split() + join() + title() + map()\nThe combination of above functions can be used to solve this problem. In this, we perform the task of extending logic to entire strings using map(). \nPython3\n# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + map() # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\"The original string is : \" + str(test_str)) # saving first and rest using split()init, *temp = test_str.split('_') # using map() to get all words other than 1st# and titlecasing themres = ''.join([init.lower(), *map(str.title, temp)]) # printing result print(\"The camel case string is : \" + str(res)) \nThe original string is : geeksforgeeks_is_best\nThe camel case string is : geeksforgeeksIsBest\n\nPython string-programs\nPython\nPython Programs\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":28,"s":0,"text":"\n01 Oct, 2020"},{"code":null,"e":78,"s":28,"text":"Given a snake case string, convert to camel case."},{"code":null,"e":214,"s":78,"text":"Input : test_str = ‘geeksforgeeks_is_best_for_geeks’ Output : geeksforgeeksIsBestForGeeks Explanation : String converted to Camel Case."},{"code":null,"e":346,"s":214,"text":"Input : test_str = ‘geeksforgeeks_best_for_geeks’ Output : geeksforgeeksBestForGeeks Explanation : String converted to Camel Case. "},{"code":null,"e":414,"s":346,"text":"Method #1 : Using split() + join() + title() + generator expression"},{"code":null,"e":644,"s":414,"text":"The combination of above functions can be used to solve this problem. In this, we first split all underscores, and then join the string appending initial word, followed by title cased words using generator expression and title()."},{"code":null,"e":652,"s":644,"text":"Python3"},{"code":"# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + generator expression # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\"The original string is : \" + str(test_str)) # split underscore using splittemp = test_str.split('_') # joining result res = temp[0] + ''.join(ele.title() for ele in temp[1:]) # printing result print(\"The camel case string is : \" + str(res)) ","e":1132,"s":652,"text":null},{"code":null,"e":1227,"s":1132,"text":"The original string is : geeksforgeeks_is_best\nThe camel case string is : geeksforgeeksIsBest\n"},{"code":null,"e":1282,"s":1227,"text":"Method #2 : Using split() + join() + title() + map()"},{"code":null,"e":1432,"s":1282,"text":"The combination of above functions can be used to solve this problem. In this, we perform the task of extending logic to entire strings using map(). "},{"code":null,"e":1440,"s":1432,"text":"Python3"},{"code":"# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + map() # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\"The original string is : \" + str(test_str)) # saving first and rest using split()init, *temp = test_str.split('_') # using map() to get all words other than 1st# and titlecasing themres = ''.join([init.lower(), *map(str.title, temp)]) # printing result print(\"The camel case string is : \" + str(res)) ","e":1965,"s":1440,"text":null},{"code":null,"e":2060,"s":1965,"text":"The original string is : geeksforgeeks_is_best\nThe camel case string is : geeksforgeeksIsBest\n"},{"code":null,"e":2083,"s":2060,"text":"Python string-programs"},{"code":null,"e":2090,"s":2083,"text":"Python"},{"code":null,"e":2106,"s":2090,"text":"Python Programs"}],"string":"[\n {\n \"code\": null,\n \"e\": 28,\n \"s\": 0,\n \"text\": \"\\n01 Oct, 2020\"\n },\n {\n \"code\": null,\n \"e\": 78,\n \"s\": 28,\n \"text\": \"Given a snake case string, convert to camel case.\"\n },\n {\n \"code\": null,\n \"e\": 214,\n \"s\": 78,\n \"text\": \"Input : test_str = ‘geeksforgeeks_is_best_for_geeks’ Output : geeksforgeeksIsBestForGeeks Explanation : String converted to Camel Case.\"\n },\n {\n \"code\": null,\n \"e\": 346,\n \"s\": 214,\n \"text\": \"Input : test_str = ‘geeksforgeeks_best_for_geeks’ Output : geeksforgeeksBestForGeeks Explanation : String converted to Camel Case. \"\n },\n {\n \"code\": null,\n \"e\": 414,\n \"s\": 346,\n \"text\": \"Method #1 : Using split() + join() + title() + generator expression\"\n },\n {\n \"code\": null,\n \"e\": 644,\n \"s\": 414,\n \"text\": \"The combination of above functions can be used to solve this problem. In this, we first split all underscores, and then join the string appending initial word, followed by title cased words using generator expression and title().\"\n },\n {\n \"code\": null,\n \"e\": 652,\n \"s\": 644,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + generator expression # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\\\"The original string is : \\\" + str(test_str)) # split underscore using splittemp = test_str.split('_') # joining result res = temp[0] + ''.join(ele.title() for ele in temp[1:]) # printing result print(\\\"The camel case string is : \\\" + str(res)) \",\n \"e\": 1132,\n \"s\": 652,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1227,\n \"s\": 1132,\n \"text\": \"The original string is : geeksforgeeks_is_best\\nThe camel case string is : geeksforgeeksIsBest\\n\"\n },\n {\n \"code\": null,\n \"e\": 1282,\n \"s\": 1227,\n \"text\": \"Method #2 : Using split() + join() + title() + map()\"\n },\n {\n \"code\": null,\n \"e\": 1432,\n \"s\": 1282,\n \"text\": \"The combination of above functions can be used to solve this problem. In this, we perform the task of extending logic to entire strings using map(). \"\n },\n {\n \"code\": null,\n \"e\": 1440,\n \"s\": 1432,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"# Python3 code to demonstrate working of # Convert Snake Case String to Camel Case# Using split() + join() + title() + map() # initializing stringtest_str = 'geeksforgeeks_is_best' # printing original stringprint(\\\"The original string is : \\\" + str(test_str)) # saving first and rest using split()init, *temp = test_str.split('_') # using map() to get all words other than 1st# and titlecasing themres = ''.join([init.lower(), *map(str.title, temp)]) # printing result print(\\\"The camel case string is : \\\" + str(res)) \",\n \"e\": 1965,\n \"s\": 1440,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2060,\n \"s\": 1965,\n \"text\": \"The original string is : geeksforgeeks_is_best\\nThe camel case string is : geeksforgeeksIsBest\\n\"\n },\n {\n \"code\": null,\n \"e\": 2083,\n \"s\": 2060,\n \"text\": \"Python string-programs\"\n },\n {\n \"code\": null,\n \"e\": 2090,\n \"s\": 2083,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 2106,\n \"s\": 2090,\n \"text\": \"Python Programs\"\n }\n]"}}},{"rowIdx":27,"cells":{"title":{"kind":"string","value":"How to set the dropdown button in the center?"},"text":{"kind":"string","value":"26 Aug, 2020\nDropdown menu is a menu that offers a list of options to choose from. The title of the menu is always in display and the rest of the items are hidden. It is a toggleable menu in which all the items can be shown by clicking on it.\nDropdown button can be positioned in the center of the page by setting the “text-align” property of dropdown div to center. The following example contains a simple Bootstrap dropdown menu with an added class “my-menu”. The property “text-align: center” is added to the class.\nExample: Here, the property “text-align: center” aligns the content of dropdown div to center, which sets the dropdown button to the center.\n
\nOutput:\nBootstrap-Misc\nBootstrap\nWeb Technologies\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":28,"s":0,"text":"\n26 Aug, 2020"},{"code":null,"e":258,"s":28,"text":"Dropdown menu is a menu that offers a list of options to choose from. The title of the menu is always in display and the rest of the items are hidden. It is a toggleable menu in which all the items can be shown by clicking on it."},{"code":null,"e":534,"s":258,"text":"Dropdown button can be positioned in the center of the page by setting the “text-align” property of dropdown div to center. The following example contains a simple Bootstrap dropdown menu with an added class “my-menu”. The property “text-align: center” is added to the class."},{"code":null,"e":675,"s":534,"text":"Example: Here, the property “text-align: center” aligns the content of dropdown div to center, which sets the dropdown button to the center."},{"code":"
","e":2697,"s":675,"text":null},{"code":null,"e":2705,"s":2697,"text":"Output:"},{"code":null,"e":2720,"s":2705,"text":"Bootstrap-Misc"},{"code":null,"e":2730,"s":2720,"text":"Bootstrap"},{"code":null,"e":2747,"s":2730,"text":"Web Technologies"}],"string":"[\n {\n \"code\": null,\n \"e\": 28,\n \"s\": 0,\n \"text\": \"\\n26 Aug, 2020\"\n },\n {\n \"code\": null,\n \"e\": 258,\n \"s\": 28,\n \"text\": \"Dropdown menu is a menu that offers a list of options to choose from. The title of the menu is always in display and the rest of the items are hidden. It is a toggleable menu in which all the items can be shown by clicking on it.\"\n },\n {\n \"code\": null,\n \"e\": 534,\n \"s\": 258,\n \"text\": \"Dropdown button can be positioned in the center of the page by setting the “text-align” property of dropdown div to center. The following example contains a simple Bootstrap dropdown menu with an added class “my-menu”. The property “text-align: center” is added to the class.\"\n },\n {\n \"code\": null,\n \"e\": 675,\n \"s\": 534,\n \"text\": \"Example: Here, the property “text-align: center” aligns the content of dropdown div to center, which sets the dropdown button to the center.\"\n },\n {\n \"code\": \"
\",\n \"e\": 2697,\n \"s\": 675,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2705,\n \"s\": 2697,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 2720,\n \"s\": 2705,\n \"text\": \"Bootstrap-Misc\"\n },\n {\n \"code\": null,\n \"e\": 2730,\n \"s\": 2720,\n \"text\": \"Bootstrap\"\n },\n {\n \"code\": null,\n \"e\": 2747,\n \"s\": 2730,\n \"text\": \"Web Technologies\"\n }\n]"}}},{"rowIdx":28,"cells":{"title":{"kind":"string","value":"Main thread in Java"},"text":{"kind":"string","value":"21 Sep, 2021\nJava provides built-in support for multithreaded programming. A multi-threaded program contains two or more parts that can run concurrently. Each part of such a program is called a thread, and each thread defines a separate path of execution.When a Java program starts up, one thread begins running immediately. This is usually called the main thread of our program because it is the one that is executed when our program begins. \nThere are certain properties associated with the main thread which are as follows:\nIt is the thread from which other “child” threads will be spawned.\nOften, it must be the last thread to finish execution because it performs various shutdown actions\nThe flow diagram is as follows:\nHow to control Main thread\nThe main thread is created automatically when our program is started. To control it we must obtain a reference to it. This can be done by calling the method currentThread( ) which is present in Thread class. This method returns a reference to the thread on which it is called. The default priority of Main thread is 5 and for all remaining user threads priority will be inherited from parent to child.\nExample\nJava\n// Java program to control the Main Thread // Importing required classesimport java.io.*;import java.util.*; // Class 1// Main class extending thread classpublic class Test extends Thread { // Main driver method public static void main(String[] args) { // Getting reference to Main thread Thread t = Thread.currentThread(); // Getting name of Main thread System.out.println(\"Current thread: \" + t.getName()); // Changing the name of Main thread t.setName(\"Geeks\"); System.out.println(\"After name change: \" + t.getName()); // Getting priority of Main thread System.out.println(\"Main thread priority: \" + t.getPriority()); // Setting priority of Main thread to MAX(10) t.setPriority(MAX_PRIORITY); // Print and display the main thread priority System.out.println(\"Main thread new priority: \" + t.getPriority()); for (int i = 0; i < 5; i++) { System.out.println(\"Main thread\"); } // Main thread creating a child thread Thread ct = new Thread() { // run() method of a thread public void run() { for (int i = 0; i < 5; i++) { System.out.println(\"Child thread\"); } } }; // Getting priority of child thread // which will be inherited from Main thread // as it is created by Main thread System.out.println(\"Child thread priority: \" + ct.getPriority()); // Setting priority of Main thread to MIN(1) ct.setPriority(MIN_PRIORITY); System.out.println(\"Child thread new priority: \" + ct.getPriority()); // Starting child thread ct.start(); }} // Class 2// Helper class extending Thread class// Child Thread classclass ChildThread extends Thread { @Override public void run() { for (int i = 0; i < 5; i++) { // Print statement whenever child thread is // called System.out.println(\"Child thread\"); } }}\nCurrent thread: main\nAfter name change: Geeks\nMain thread priority: 5\nMain thread new priority: 10\nMain thread\nMain thread\nMain thread\nMain thread\nMain thread\nChild thread priority: 10\nChild thread new priority: 1\nChild thread\nChild thread\nChild thread\nChild thread\nChild thread\nNow let us discuss the relationship between the main() method and the main thread in Java. For each program, a Main thread is created by JVM(Java Virtual Machine). The “Main” thread first verifies the existence of the main() method, and then it initializes the class. Note that from JDK 6, main() method is mandatory in a standalone java application.\nDeadlocking with use of Main Thread(only single thread)\nWe can create a deadlock by just using the Main thread, i.e. by just using a single thread.\nExample\nJava\n// Java program to demonstrate deadlock// using Main thread // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Try block to check for exceptions try { // Print statement System.out.println(\"Entering into Deadlock\"); // Joining the current thread Thread.currentThread().join(); // This statement will never execute System.out.println(\"This statement will never execute\"); } // Catch block to handle the exceptions catch (InterruptedException e) { // Display the exception along with line number // using printStackTrace() method e.printStackTrace(); } }}\nOutput: \nOutput explanation: The statement “Thread.currentThread().join()”, will tell Main thread to wait for this thread(i.e. wait for itself) to die. Thus Main thread wait for itself to die, which is nothing but a deadlock.\nRelated Article: Daemon Threads in Java.This article is contributed by Gaurav Miglani. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.\nthalabala628\nsagartomar9927\nsurindertarika1234\nJava-Multithreading\nJava\nJava\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":52,"s":24,"text":"\n21 Sep, 2021"},{"code":null,"e":483,"s":52,"text":"Java provides built-in support for multithreaded programming. A multi-threaded program contains two or more parts that can run concurrently. Each part of such a program is called a thread, and each thread defines a separate path of execution.When a Java program starts up, one thread begins running immediately. This is usually called the main thread of our program because it is the one that is executed when our program begins. "},{"code":null,"e":566,"s":483,"text":"There are certain properties associated with the main thread which are as follows:"},{"code":null,"e":633,"s":566,"text":"It is the thread from which other “child” threads will be spawned."},{"code":null,"e":732,"s":633,"text":"Often, it must be the last thread to finish execution because it performs various shutdown actions"},{"code":null,"e":764,"s":732,"text":"The flow diagram is as follows:"},{"code":null,"e":791,"s":764,"text":"How to control Main thread"},{"code":null,"e":1193,"s":791,"text":"The main thread is created automatically when our program is started. To control it we must obtain a reference to it. This can be done by calling the method currentThread( ) which is present in Thread class. This method returns a reference to the thread on which it is called. The default priority of Main thread is 5 and for all remaining user threads priority will be inherited from parent to child."},{"code":null,"e":1201,"s":1193,"text":"Example"},{"code":null,"e":1206,"s":1201,"text":"Java"},{"code":"// Java program to control the Main Thread // Importing required classesimport java.io.*;import java.util.*; // Class 1// Main class extending thread classpublic class Test extends Thread { // Main driver method public static void main(String[] args) { // Getting reference to Main thread Thread t = Thread.currentThread(); // Getting name of Main thread System.out.println(\"Current thread: \" + t.getName()); // Changing the name of Main thread t.setName(\"Geeks\"); System.out.println(\"After name change: \" + t.getName()); // Getting priority of Main thread System.out.println(\"Main thread priority: \" + t.getPriority()); // Setting priority of Main thread to MAX(10) t.setPriority(MAX_PRIORITY); // Print and display the main thread priority System.out.println(\"Main thread new priority: \" + t.getPriority()); for (int i = 0; i < 5; i++) { System.out.println(\"Main thread\"); } // Main thread creating a child thread Thread ct = new Thread() { // run() method of a thread public void run() { for (int i = 0; i < 5; i++) { System.out.println(\"Child thread\"); } } }; // Getting priority of child thread // which will be inherited from Main thread // as it is created by Main thread System.out.println(\"Child thread priority: \" + ct.getPriority()); // Setting priority of Main thread to MIN(1) ct.setPriority(MIN_PRIORITY); System.out.println(\"Child thread new priority: \" + ct.getPriority()); // Starting child thread ct.start(); }} // Class 2// Helper class extending Thread class// Child Thread classclass ChildThread extends Thread { @Override public void run() { for (int i = 0; i < 5; i++) { // Print statement whenever child thread is // called System.out.println(\"Child thread\"); } }}","e":3429,"s":1206,"text":null},{"code":null,"e":3708,"s":3429,"text":"Current thread: main\nAfter name change: Geeks\nMain thread priority: 5\nMain thread new priority: 10\nMain thread\nMain thread\nMain thread\nMain thread\nMain thread\nChild thread priority: 10\nChild thread new priority: 1\nChild thread\nChild thread\nChild thread\nChild thread\nChild thread"},{"code":null,"e":4059,"s":3708,"text":"Now let us discuss the relationship between the main() method and the main thread in Java. For each program, a Main thread is created by JVM(Java Virtual Machine). The “Main” thread first verifies the existence of the main() method, and then it initializes the class. Note that from JDK 6, main() method is mandatory in a standalone java application."},{"code":null,"e":4115,"s":4059,"text":"Deadlocking with use of Main Thread(only single thread)"},{"code":null,"e":4207,"s":4115,"text":"We can create a deadlock by just using the Main thread, i.e. by just using a single thread."},{"code":null,"e":4215,"s":4207,"text":"Example"},{"code":null,"e":4220,"s":4215,"text":"Java"},{"code":"// Java program to demonstrate deadlock// using Main thread // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Try block to check for exceptions try { // Print statement System.out.println(\"Entering into Deadlock\"); // Joining the current thread Thread.currentThread().join(); // This statement will never execute System.out.println(\"This statement will never execute\"); } // Catch block to handle the exceptions catch (InterruptedException e) { // Display the exception along with line number // using printStackTrace() method e.printStackTrace(); } }}","e":4894,"s":4220,"text":null},{"code":null,"e":4903,"s":4894,"text":"Output: "},{"code":null,"e":5120,"s":4903,"text":"Output explanation: The statement “Thread.currentThread().join()”, will tell Main thread to wait for this thread(i.e. wait for itself) to die. Thus Main thread wait for itself to die, which is nothing but a deadlock."},{"code":null,"e":5582,"s":5120,"text":"Related Article: Daemon Threads in Java.This article is contributed by Gaurav Miglani. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."},{"code":null,"e":5595,"s":5582,"text":"thalabala628"},{"code":null,"e":5610,"s":5595,"text":"sagartomar9927"},{"code":null,"e":5629,"s":5610,"text":"surindertarika1234"},{"code":null,"e":5649,"s":5629,"text":"Java-Multithreading"},{"code":null,"e":5654,"s":5649,"text":"Java"},{"code":null,"e":5659,"s":5654,"text":"Java"}],"string":"[\n {\n \"code\": null,\n \"e\": 52,\n \"s\": 24,\n \"text\": \"\\n21 Sep, 2021\"\n },\n {\n \"code\": null,\n \"e\": 483,\n \"s\": 52,\n \"text\": \"Java provides built-in support for multithreaded programming. A multi-threaded program contains two or more parts that can run concurrently. Each part of such a program is called a thread, and each thread defines a separate path of execution.When a Java program starts up, one thread begins running immediately. This is usually called the main thread of our program because it is the one that is executed when our program begins. \"\n },\n {\n \"code\": null,\n \"e\": 566,\n \"s\": 483,\n \"text\": \"There are certain properties associated with the main thread which are as follows:\"\n },\n {\n \"code\": null,\n \"e\": 633,\n \"s\": 566,\n \"text\": \"It is the thread from which other “child” threads will be spawned.\"\n },\n {\n \"code\": null,\n \"e\": 732,\n \"s\": 633,\n \"text\": \"Often, it must be the last thread to finish execution because it performs various shutdown actions\"\n },\n {\n \"code\": null,\n \"e\": 764,\n \"s\": 732,\n \"text\": \"The flow diagram is as follows:\"\n },\n {\n \"code\": null,\n \"e\": 791,\n \"s\": 764,\n \"text\": \"How to control Main thread\"\n },\n {\n \"code\": null,\n \"e\": 1193,\n \"s\": 791,\n \"text\": \"The main thread is created automatically when our program is started. To control it we must obtain a reference to it. This can be done by calling the method currentThread( ) which is present in Thread class. This method returns a reference to the thread on which it is called. The default priority of Main thread is 5 and for all remaining user threads priority will be inherited from parent to child.\"\n },\n {\n \"code\": null,\n \"e\": 1201,\n \"s\": 1193,\n \"text\": \"Example\"\n },\n {\n \"code\": null,\n \"e\": 1206,\n \"s\": 1201,\n \"text\": \"Java\"\n },\n {\n \"code\": \"// Java program to control the Main Thread // Importing required classesimport java.io.*;import java.util.*; // Class 1// Main class extending thread classpublic class Test extends Thread { // Main driver method public static void main(String[] args) { // Getting reference to Main thread Thread t = Thread.currentThread(); // Getting name of Main thread System.out.println(\\\"Current thread: \\\" + t.getName()); // Changing the name of Main thread t.setName(\\\"Geeks\\\"); System.out.println(\\\"After name change: \\\" + t.getName()); // Getting priority of Main thread System.out.println(\\\"Main thread priority: \\\" + t.getPriority()); // Setting priority of Main thread to MAX(10) t.setPriority(MAX_PRIORITY); // Print and display the main thread priority System.out.println(\\\"Main thread new priority: \\\" + t.getPriority()); for (int i = 0; i < 5; i++) { System.out.println(\\\"Main thread\\\"); } // Main thread creating a child thread Thread ct = new Thread() { // run() method of a thread public void run() { for (int i = 0; i < 5; i++) { System.out.println(\\\"Child thread\\\"); } } }; // Getting priority of child thread // which will be inherited from Main thread // as it is created by Main thread System.out.println(\\\"Child thread priority: \\\" + ct.getPriority()); // Setting priority of Main thread to MIN(1) ct.setPriority(MIN_PRIORITY); System.out.println(\\\"Child thread new priority: \\\" + ct.getPriority()); // Starting child thread ct.start(); }} // Class 2// Helper class extending Thread class// Child Thread classclass ChildThread extends Thread { @Override public void run() { for (int i = 0; i < 5; i++) { // Print statement whenever child thread is // called System.out.println(\\\"Child thread\\\"); } }}\",\n \"e\": 3429,\n \"s\": 1206,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 3708,\n \"s\": 3429,\n \"text\": \"Current thread: main\\nAfter name change: Geeks\\nMain thread priority: 5\\nMain thread new priority: 10\\nMain thread\\nMain thread\\nMain thread\\nMain thread\\nMain thread\\nChild thread priority: 10\\nChild thread new priority: 1\\nChild thread\\nChild thread\\nChild thread\\nChild thread\\nChild thread\"\n },\n {\n \"code\": null,\n \"e\": 4059,\n \"s\": 3708,\n \"text\": \"Now let us discuss the relationship between the main() method and the main thread in Java. For each program, a Main thread is created by JVM(Java Virtual Machine). The “Main” thread first verifies the existence of the main() method, and then it initializes the class. Note that from JDK 6, main() method is mandatory in a standalone java application.\"\n },\n {\n \"code\": null,\n \"e\": 4115,\n \"s\": 4059,\n \"text\": \"Deadlocking with use of Main Thread(only single thread)\"\n },\n {\n \"code\": null,\n \"e\": 4207,\n \"s\": 4115,\n \"text\": \"We can create a deadlock by just using the Main thread, i.e. by just using a single thread.\"\n },\n {\n \"code\": null,\n \"e\": 4215,\n \"s\": 4207,\n \"text\": \"Example\"\n },\n {\n \"code\": null,\n \"e\": 4220,\n \"s\": 4215,\n \"text\": \"Java\"\n },\n {\n \"code\": \"// Java program to demonstrate deadlock// using Main thread // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Try block to check for exceptions try { // Print statement System.out.println(\\\"Entering into Deadlock\\\"); // Joining the current thread Thread.currentThread().join(); // This statement will never execute System.out.println(\\\"This statement will never execute\\\"); } // Catch block to handle the exceptions catch (InterruptedException e) { // Display the exception along with line number // using printStackTrace() method e.printStackTrace(); } }}\",\n \"e\": 4894,\n \"s\": 4220,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 4903,\n \"s\": 4894,\n \"text\": \"Output: \"\n },\n {\n \"code\": null,\n \"e\": 5120,\n \"s\": 4903,\n \"text\": \"Output explanation: The statement “Thread.currentThread().join()”, will tell Main thread to wait for this thread(i.e. wait for itself) to die. Thus Main thread wait for itself to die, which is nothing but a deadlock.\"\n },\n {\n \"code\": null,\n \"e\": 5582,\n \"s\": 5120,\n \"text\": \"Related Article: Daemon Threads in Java.This article is contributed by Gaurav Miglani. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.\"\n },\n {\n \"code\": null,\n \"e\": 5595,\n \"s\": 5582,\n \"text\": \"thalabala628\"\n },\n {\n \"code\": null,\n \"e\": 5610,\n \"s\": 5595,\n \"text\": \"sagartomar9927\"\n },\n {\n \"code\": null,\n \"e\": 5629,\n \"s\": 5610,\n \"text\": \"surindertarika1234\"\n },\n {\n \"code\": null,\n \"e\": 5649,\n \"s\": 5629,\n \"text\": \"Java-Multithreading\"\n },\n {\n \"code\": null,\n \"e\": 5654,\n \"s\": 5649,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 5659,\n \"s\": 5654,\n \"text\": \"Java\"\n }\n]"}}},{"rowIdx":29,"cells":{"title":{"kind":"string","value":"Removing the object from the top of the Stack in C#"},"text":{"kind":"string","value":"28 Jan, 2019\nStack.Pop Method is used to remove and returns the object at the top of the Stack. This method comes under the System.Collections.Generic namespace.\nSyntax:\npublic T Pop ();\nReturn Value: It returns the Object which is to be removed from the top of the Stack.\nException : This method will give InvalidOperationException if the Stack is empty.\nBelow programs illustrate the use of the above-discussed method:\nExample 1:\n// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of Strings Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(\"Geeks\"); myStack.Push(\"Geeks Classes\"); myStack.Push(\"Noida\"); myStack.Push(\"Data Structures\"); myStack.Push(\"GeeksforGeeks\"); Console.WriteLine(\"Number of elements in the Stack: {0}\", myStack.Count); // Retrieveing top element of Stack Console.Write(\"Top element of Stack is: \"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\"\\nNumber of elements in the Stack: {0}\", myStack.Count); }}\nNumber of elements in the Stack: 5\nTop element of Stack is: GeeksforGeeks\nNumber of elements in the Stack: 4\n\nExample 2:\n// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of integers Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(7); myStack.Push(9); Console.WriteLine(\"Number of elements in the Stack: {0}\", myStack.Count); // Retrieveing top element of Stack Console.Write(\"Top element of Stack is: \"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\"\\nNumber of elements in the Stack: {0}\", myStack.Count); }}\nNumber of elements in the Stack: 2\nTop element of Stack is: 9\nNumber of elements in the Stack: 1\n\nReference:\nhttps://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.stack-1.pop?view=netframework-4.7.2\nCSharp-Generic-Namespace\nCSharp-Generic-Stack\nCSharp-method\nC#\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":28,"s":0,"text":"\n28 Jan, 2019"},{"code":null,"e":183,"s":28,"text":"Stack.Pop Method is used to remove and returns the object at the top of the Stack. This method comes under the System.Collections.Generic namespace."},{"code":null,"e":191,"s":183,"text":"Syntax:"},{"code":null,"e":208,"s":191,"text":"public T Pop ();"},{"code":null,"e":294,"s":208,"text":"Return Value: It returns the Object which is to be removed from the top of the Stack."},{"code":null,"e":380,"s":294,"text":"Exception : This method will give InvalidOperationException if the Stack is empty."},{"code":null,"e":445,"s":380,"text":"Below programs illustrate the use of the above-discussed method:"},{"code":null,"e":456,"s":445,"text":"Example 1:"},{"code":"// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of Strings Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(\"Geeks\"); myStack.Push(\"Geeks Classes\"); myStack.Push(\"Noida\"); myStack.Push(\"Data Structures\"); myStack.Push(\"GeeksforGeeks\"); Console.WriteLine(\"Number of elements in the Stack: {0}\", myStack.Count); // Retrieveing top element of Stack Console.Write(\"Top element of Stack is: \"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\"\\nNumber of elements in the Stack: {0}\", myStack.Count); }}","e":1429,"s":456,"text":null},{"code":null,"e":1539,"s":1429,"text":"Number of elements in the Stack: 5\nTop element of Stack is: GeeksforGeeks\nNumber of elements in the Stack: 4\n"},{"code":null,"e":1550,"s":1539,"text":"Example 2:"},{"code":"// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of integers Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(7); myStack.Push(9); Console.WriteLine(\"Number of elements in the Stack: {0}\", myStack.Count); // Retrieveing top element of Stack Console.Write(\"Top element of Stack is: \"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\"\\nNumber of elements in the Stack: {0}\", myStack.Count); }}","e":2391,"s":1550,"text":null},{"code":null,"e":2489,"s":2391,"text":"Number of elements in the Stack: 2\nTop element of Stack is: 9\nNumber of elements in the Stack: 1\n"},{"code":null,"e":2500,"s":2489,"text":"Reference:"},{"code":null,"e":2607,"s":2500,"text":"https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.stack-1.pop?view=netframework-4.7.2"},{"code":null,"e":2632,"s":2607,"text":"CSharp-Generic-Namespace"},{"code":null,"e":2653,"s":2632,"text":"CSharp-Generic-Stack"},{"code":null,"e":2667,"s":2653,"text":"CSharp-method"},{"code":null,"e":2670,"s":2667,"text":"C#"}],"string":"[\n {\n \"code\": null,\n \"e\": 28,\n \"s\": 0,\n \"text\": \"\\n28 Jan, 2019\"\n },\n {\n \"code\": null,\n \"e\": 183,\n \"s\": 28,\n \"text\": \"Stack.Pop Method is used to remove and returns the object at the top of the Stack. This method comes under the System.Collections.Generic namespace.\"\n },\n {\n \"code\": null,\n \"e\": 191,\n \"s\": 183,\n \"text\": \"Syntax:\"\n },\n {\n \"code\": null,\n \"e\": 208,\n \"s\": 191,\n \"text\": \"public T Pop ();\"\n },\n {\n \"code\": null,\n \"e\": 294,\n \"s\": 208,\n \"text\": \"Return Value: It returns the Object which is to be removed from the top of the Stack.\"\n },\n {\n \"code\": null,\n \"e\": 380,\n \"s\": 294,\n \"text\": \"Exception : This method will give InvalidOperationException if the Stack is empty.\"\n },\n {\n \"code\": null,\n \"e\": 445,\n \"s\": 380,\n \"text\": \"Below programs illustrate the use of the above-discussed method:\"\n },\n {\n \"code\": null,\n \"e\": 456,\n \"s\": 445,\n \"text\": \"Example 1:\"\n },\n {\n \"code\": \"// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of Strings Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(\\\"Geeks\\\"); myStack.Push(\\\"Geeks Classes\\\"); myStack.Push(\\\"Noida\\\"); myStack.Push(\\\"Data Structures\\\"); myStack.Push(\\\"GeeksforGeeks\\\"); Console.WriteLine(\\\"Number of elements in the Stack: {0}\\\", myStack.Count); // Retrieveing top element of Stack Console.Write(\\\"Top element of Stack is: \\\"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\\\"\\\\nNumber of elements in the Stack: {0}\\\", myStack.Count); }}\",\n \"e\": 1429,\n \"s\": 456,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1539,\n \"s\": 1429,\n \"text\": \"Number of elements in the Stack: 5\\nTop element of Stack is: GeeksforGeeks\\nNumber of elements in the Stack: 4\\n\"\n },\n {\n \"code\": null,\n \"e\": 1550,\n \"s\": 1539,\n \"text\": \"Example 2:\"\n },\n {\n \"code\": \"// C# Program to illustrate the// use of Stack.Pop() Methodusing System;using System.Collections.Generic; class GFG { // Main Method public static void Main() { // Creating a Stack of integers Stack myStack = new Stack(); // Inserting the elements into the Stack myStack.Push(7); myStack.Push(9); Console.WriteLine(\\\"Number of elements in the Stack: {0}\\\", myStack.Count); // Retrieveing top element of Stack Console.Write(\\\"Top element of Stack is: \\\"); Console.Write(myStack.Pop()); // printing the no of Stack element // after Pop operation Console.WriteLine(\\\"\\\\nNumber of elements in the Stack: {0}\\\", myStack.Count); }}\",\n \"e\": 2391,\n \"s\": 1550,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2489,\n \"s\": 2391,\n \"text\": \"Number of elements in the Stack: 2\\nTop element of Stack is: 9\\nNumber of elements in the Stack: 1\\n\"\n },\n {\n \"code\": null,\n \"e\": 2500,\n \"s\": 2489,\n \"text\": \"Reference:\"\n },\n {\n \"code\": null,\n \"e\": 2607,\n \"s\": 2500,\n \"text\": \"https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.stack-1.pop?view=netframework-4.7.2\"\n },\n {\n \"code\": null,\n \"e\": 2632,\n \"s\": 2607,\n \"text\": \"CSharp-Generic-Namespace\"\n },\n {\n \"code\": null,\n \"e\": 2653,\n \"s\": 2632,\n \"text\": \"CSharp-Generic-Stack\"\n },\n {\n \"code\": null,\n \"e\": 2667,\n \"s\": 2653,\n \"text\": \"CSharp-method\"\n },\n {\n \"code\": null,\n \"e\": 2670,\n \"s\": 2667,\n \"text\": \"C#\"\n }\n]"}}},{"rowIdx":30,"cells":{"title":{"kind":"string","value":"Python | Set 3 (Strings, Lists, Tuples, Iterations)"},"text":{"kind":"string","value":"13 Jul, 2022\nIn the previous article, we read about the basics of Python. Now, we continue with some more python concepts.\nA string is a sequence of characters that can be a combination of letters, numbers, and special characters. It can be declared in python by using single quotes, double quotes, or even triple quotes. These quotes are not a part of a string, they define only starting and ending of the string. Strings are immutable, i.e., they cannot be changed. Each element of the string can be accessed using indexing or slicing operations.\nPython\n# Assigning string to a variablea = 'This is a string'print (a)b = \"This is a string\"print (b)c= '''This is a string'''print (c)\nOutput:\nThis is a string\nThis is a string\nThis is a string\nLists are one of the most powerful data structures in python. Lists are sequenced data types. In Python, an empty list is created using list() function. They are just like the arrays declared in other languages. But the most powerful thing is that list need not be always homogeneous. A single list can contain strings, integers, as well as other objects. Lists can also be used for implementing stacks and queues. Lists are mutable, i.e., they can be altered once declared. The elements of list can be accessed using indexing and slicing operations.\nPython\n# Declaring a listL = [1, \"a\" , \"string\" , 1+2]print L#Adding an element in the listL.append(6) print L#Deleting last element from a listL.pop()print L#Displaying Second element of the listprint L[1]\nThe output is: \nChapters\ndescriptions off, selected\ncaptions settings, opens captions settings dialog\ncaptions off, selected\nEnglish\nThis is a modal window.\nBeginning of dialog window. Escape will cancel and close the window.\nEnd of dialog window.\n[1, 'a', 'string', 3]\n[1, 'a', 'string', 3, 6]\n[1, 'a', 'string', 3]\na\nTuples in Python: A tuple is a sequence of immutable Python objects. Tuples are just like lists with the exception that tuples cannot be changed once declared. Tuples are usually faster than lists.\nPython\ntup = (1, \"a\", \"string\", 1+2)print(tup)print(tup[1])\nThe output is : \n(1, 'a', 'string', 3)\na\nIterations in Python: Iterations or looping can be performed in python by ‘for’ and ‘while’ loops. Apart from iterating upon a particular condition, we can also iterate on strings, lists, and tuples.\nExample 1: Iteration by while loop for a condition\nPython\ni = 1while (i < 10): print(i) i += 1\nThe output is: \n1\n2\n3\n4\n5\n6\n7\n8\n9 \nExample 2: Iteration by for loop on the string\nPython\ns = \"Hello World\"for i in s: print(i)\nThe output is: \nH\ne\nl\nl\no\n \nW\no\nr\nl\nd\nExample 3: Iteration by for loop on list\nPython\nL = [1, 4, 5, 7, 8, 9]for i in L: print(i)\nThe output is: \n1\n4\n5\n7\n8\n9\nExample 4: Iteration by for loop for range\nPython\nfor i in range(0, 10): print(i)\nThe output is: \n0\n1\n2\n3\n4\n5\n6\n7\n8\n9 \n\nNext Article – Python: Dictionary and Keywords\nQuiz on Data Types in Python\nmicronick_02\nmandvimishra123\nsheetal18june\npython-list\npython-tuple\nPython\nSchool Programming\npython-list\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":52,"s":24,"text":"\n13 Jul, 2022"},{"code":null,"e":162,"s":52,"text":"In the previous article, we read about the basics of Python. Now, we continue with some more python concepts."},{"code":null,"e":589,"s":162,"text":"A string is a sequence of characters that can be a combination of letters, numbers, and special characters. It can be declared in python by using single quotes, double quotes, or even triple quotes. These quotes are not a part of a string, they define only starting and ending of the string. Strings are immutable, i.e., they cannot be changed. Each element of the string can be accessed using indexing or slicing operations."},{"code":null,"e":596,"s":589,"text":"Python"},{"code":"# Assigning string to a variablea = 'This is a string'print (a)b = \"This is a string\"print (b)c= '''This is a string'''print (c)","e":725,"s":596,"text":null},{"code":null,"e":733,"s":725,"text":"Output:"},{"code":null,"e":784,"s":733,"text":"This is a string\nThis is a string\nThis is a string"},{"code":null,"e":1336,"s":784,"text":"Lists are one of the most powerful data structures in python. Lists are sequenced data types. In Python, an empty list is created using list() function. They are just like the arrays declared in other languages. But the most powerful thing is that list need not be always homogeneous. A single list can contain strings, integers, as well as other objects. Lists can also be used for implementing stacks and queues. Lists are mutable, i.e., they can be altered once declared. The elements of list can be accessed using indexing and slicing operations."},{"code":null,"e":1343,"s":1336,"text":"Python"},{"code":"# Declaring a listL = [1, \"a\" , \"string\" , 1+2]print L#Adding an element in the listL.append(6) print L#Deleting last element from a listL.pop()print L#Displaying Second element of the listprint L[1]","e":1546,"s":1343,"text":null},{"code":null,"e":1563,"s":1546,"text":"The output is: "},{"code":null,"e":1572,"s":1563,"text":"Chapters"},{"code":null,"e":1599,"s":1572,"text":"descriptions off, selected"},{"code":null,"e":1649,"s":1599,"text":"captions settings, opens captions settings dialog"},{"code":null,"e":1672,"s":1649,"text":"captions off, selected"},{"code":null,"e":1680,"s":1672,"text":"English"},{"code":null,"e":1704,"s":1680,"text":"This is a modal window."},{"code":null,"e":1773,"s":1704,"text":"Beginning of dialog window. Escape will cancel and close the window."},{"code":null,"e":1795,"s":1773,"text":"End of dialog window."},{"code":null,"e":1866,"s":1795,"text":"[1, 'a', 'string', 3]\n[1, 'a', 'string', 3, 6]\n[1, 'a', 'string', 3]\na"},{"code":null,"e":2064,"s":1866,"text":"Tuples in Python: A tuple is a sequence of immutable Python objects. Tuples are just like lists with the exception that tuples cannot be changed once declared. Tuples are usually faster than lists."},{"code":null,"e":2071,"s":2064,"text":"Python"},{"code":"tup = (1, \"a\", \"string\", 1+2)print(tup)print(tup[1])","e":2124,"s":2071,"text":null},{"code":null,"e":2141,"s":2124,"text":"The output is : "},{"code":null,"e":2165,"s":2141,"text":"(1, 'a', 'string', 3)\na"},{"code":null,"e":2365,"s":2165,"text":"Iterations in Python: Iterations or looping can be performed in python by ‘for’ and ‘while’ loops. Apart from iterating upon a particular condition, we can also iterate on strings, lists, and tuples."},{"code":null,"e":2416,"s":2365,"text":"Example 1: Iteration by while loop for a condition"},{"code":null,"e":2423,"s":2416,"text":"Python"},{"code":"i = 1while (i < 10): print(i) i += 1","e":2466,"s":2423,"text":null},{"code":null,"e":2482,"s":2466,"text":"The output is: "},{"code":null,"e":2501,"s":2482,"text":"1\n2\n3\n4\n5\n6\n7\n8\n9 "},{"code":null,"e":2548,"s":2501,"text":"Example 2: Iteration by for loop on the string"},{"code":null,"e":2555,"s":2548,"text":"Python"},{"code":"s = \"Hello World\"for i in s: print(i)","e":2596,"s":2555,"text":null},{"code":null,"e":2612,"s":2596,"text":"The output is: "},{"code":null,"e":2634,"s":2612,"text":"H\ne\nl\nl\no\n \nW\no\nr\nl\nd"},{"code":null,"e":2675,"s":2634,"text":"Example 3: Iteration by for loop on list"},{"code":null,"e":2682,"s":2675,"text":"Python"},{"code":"L = [1, 4, 5, 7, 8, 9]for i in L: print(i)","e":2728,"s":2682,"text":null},{"code":null,"e":2744,"s":2728,"text":"The output is: "},{"code":null,"e":2756,"s":2744,"text":"1\n4\n5\n7\n8\n9"},{"code":null,"e":2799,"s":2756,"text":"Example 4: Iteration by for loop for range"},{"code":null,"e":2806,"s":2799,"text":"Python"},{"code":"for i in range(0, 10): print(i)","e":2841,"s":2806,"text":null},{"code":null,"e":2857,"s":2841,"text":"The output is: "},{"code":null,"e":2879,"s":2857,"text":"0\n1\n2\n3\n4\n5\n6\n7\n8\n9 \n"},{"code":null,"e":2926,"s":2879,"text":"Next Article – Python: Dictionary and Keywords"},{"code":null,"e":2955,"s":2926,"text":"Quiz on Data Types in Python"},{"code":null,"e":2968,"s":2955,"text":"micronick_02"},{"code":null,"e":2984,"s":2968,"text":"mandvimishra123"},{"code":null,"e":2998,"s":2984,"text":"sheetal18june"},{"code":null,"e":3010,"s":2998,"text":"python-list"},{"code":null,"e":3023,"s":3010,"text":"python-tuple"},{"code":null,"e":3030,"s":3023,"text":"Python"},{"code":null,"e":3049,"s":3030,"text":"School Programming"},{"code":null,"e":3061,"s":3049,"text":"python-list"}],"string":"[\n {\n \"code\": null,\n \"e\": 52,\n \"s\": 24,\n \"text\": \"\\n13 Jul, 2022\"\n },\n {\n \"code\": null,\n \"e\": 162,\n \"s\": 52,\n \"text\": \"In the previous article, we read about the basics of Python. Now, we continue with some more python concepts.\"\n },\n {\n \"code\": null,\n \"e\": 589,\n \"s\": 162,\n \"text\": \"A string is a sequence of characters that can be a combination of letters, numbers, and special characters. It can be declared in python by using single quotes, double quotes, or even triple quotes. These quotes are not a part of a string, they define only starting and ending of the string. Strings are immutable, i.e., they cannot be changed. Each element of the string can be accessed using indexing or slicing operations.\"\n },\n {\n \"code\": null,\n \"e\": 596,\n \"s\": 589,\n \"text\": \"Python\"\n },\n {\n \"code\": \"# Assigning string to a variablea = 'This is a string'print (a)b = \\\"This is a string\\\"print (b)c= '''This is a string'''print (c)\",\n \"e\": 725,\n \"s\": 596,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 733,\n \"s\": 725,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 784,\n \"s\": 733,\n \"text\": \"This is a string\\nThis is a string\\nThis is a string\"\n },\n {\n \"code\": null,\n \"e\": 1336,\n \"s\": 784,\n \"text\": \"Lists are one of the most powerful data structures in python. Lists are sequenced data types. In Python, an empty list is created using list() function. They are just like the arrays declared in other languages. But the most powerful thing is that list need not be always homogeneous. A single list can contain strings, integers, as well as other objects. Lists can also be used for implementing stacks and queues. Lists are mutable, i.e., they can be altered once declared. The elements of list can be accessed using indexing and slicing operations.\"\n },\n {\n \"code\": null,\n \"e\": 1343,\n \"s\": 1336,\n \"text\": \"Python\"\n },\n {\n \"code\": \"# Declaring a listL = [1, \\\"a\\\" , \\\"string\\\" , 1+2]print L#Adding an element in the listL.append(6) print L#Deleting last element from a listL.pop()print L#Displaying Second element of the listprint L[1]\",\n \"e\": 1546,\n \"s\": 1343,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1563,\n \"s\": 1546,\n \"text\": \"The output is: \"\n },\n {\n \"code\": null,\n \"e\": 1572,\n \"s\": 1563,\n \"text\": \"Chapters\"\n },\n {\n \"code\": null,\n \"e\": 1599,\n \"s\": 1572,\n \"text\": \"descriptions off, selected\"\n },\n {\n \"code\": null,\n \"e\": 1649,\n \"s\": 1599,\n \"text\": \"captions settings, opens captions settings dialog\"\n },\n {\n \"code\": null,\n \"e\": 1672,\n \"s\": 1649,\n \"text\": \"captions off, selected\"\n },\n {\n \"code\": null,\n \"e\": 1680,\n \"s\": 1672,\n \"text\": \"English\"\n },\n {\n \"code\": null,\n \"e\": 1704,\n \"s\": 1680,\n \"text\": \"This is a modal window.\"\n },\n {\n \"code\": null,\n \"e\": 1773,\n \"s\": 1704,\n \"text\": \"Beginning of dialog window. Escape will cancel and close the window.\"\n },\n {\n \"code\": null,\n \"e\": 1795,\n \"s\": 1773,\n \"text\": \"End of dialog window.\"\n },\n {\n \"code\": null,\n \"e\": 1866,\n \"s\": 1795,\n \"text\": \"[1, 'a', 'string', 3]\\n[1, 'a', 'string', 3, 6]\\n[1, 'a', 'string', 3]\\na\"\n },\n {\n \"code\": null,\n \"e\": 2064,\n \"s\": 1866,\n \"text\": \"Tuples in Python: A tuple is a sequence of immutable Python objects. Tuples are just like lists with the exception that tuples cannot be changed once declared. Tuples are usually faster than lists.\"\n },\n {\n \"code\": null,\n \"e\": 2071,\n \"s\": 2064,\n \"text\": \"Python\"\n },\n {\n \"code\": \"tup = (1, \\\"a\\\", \\\"string\\\", 1+2)print(tup)print(tup[1])\",\n \"e\": 2124,\n \"s\": 2071,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2141,\n \"s\": 2124,\n \"text\": \"The output is : \"\n },\n {\n \"code\": null,\n \"e\": 2165,\n \"s\": 2141,\n \"text\": \"(1, 'a', 'string', 3)\\na\"\n },\n {\n \"code\": null,\n \"e\": 2365,\n \"s\": 2165,\n \"text\": \"Iterations in Python: Iterations or looping can be performed in python by ‘for’ and ‘while’ loops. Apart from iterating upon a particular condition, we can also iterate on strings, lists, and tuples.\"\n },\n {\n \"code\": null,\n \"e\": 2416,\n \"s\": 2365,\n \"text\": \"Example 1: Iteration by while loop for a condition\"\n },\n {\n \"code\": null,\n \"e\": 2423,\n \"s\": 2416,\n \"text\": \"Python\"\n },\n {\n \"code\": \"i = 1while (i < 10): print(i) i += 1\",\n \"e\": 2466,\n \"s\": 2423,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2482,\n \"s\": 2466,\n \"text\": \"The output is: \"\n },\n {\n \"code\": null,\n \"e\": 2501,\n \"s\": 2482,\n \"text\": \"1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9 \"\n },\n {\n \"code\": null,\n \"e\": 2548,\n \"s\": 2501,\n \"text\": \"Example 2: Iteration by for loop on the string\"\n },\n {\n \"code\": null,\n \"e\": 2555,\n \"s\": 2548,\n \"text\": \"Python\"\n },\n {\n \"code\": \"s = \\\"Hello World\\\"for i in s: print(i)\",\n \"e\": 2596,\n \"s\": 2555,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2612,\n \"s\": 2596,\n \"text\": \"The output is: \"\n },\n {\n \"code\": null,\n \"e\": 2634,\n \"s\": 2612,\n \"text\": \"H\\ne\\nl\\nl\\no\\n \\nW\\no\\nr\\nl\\nd\"\n },\n {\n \"code\": null,\n \"e\": 2675,\n \"s\": 2634,\n \"text\": \"Example 3: Iteration by for loop on list\"\n },\n {\n \"code\": null,\n \"e\": 2682,\n \"s\": 2675,\n \"text\": \"Python\"\n },\n {\n \"code\": \"L = [1, 4, 5, 7, 8, 9]for i in L: print(i)\",\n \"e\": 2728,\n \"s\": 2682,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2744,\n \"s\": 2728,\n \"text\": \"The output is: \"\n },\n {\n \"code\": null,\n \"e\": 2756,\n \"s\": 2744,\n \"text\": \"1\\n4\\n5\\n7\\n8\\n9\"\n },\n {\n \"code\": null,\n \"e\": 2799,\n \"s\": 2756,\n \"text\": \"Example 4: Iteration by for loop for range\"\n },\n {\n \"code\": null,\n \"e\": 2806,\n \"s\": 2799,\n \"text\": \"Python\"\n },\n {\n \"code\": \"for i in range(0, 10): print(i)\",\n \"e\": 2841,\n \"s\": 2806,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 2857,\n \"s\": 2841,\n \"text\": \"The output is: \"\n },\n {\n \"code\": null,\n \"e\": 2879,\n \"s\": 2857,\n \"text\": \"0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9 \\n\"\n },\n {\n \"code\": null,\n \"e\": 2926,\n \"s\": 2879,\n \"text\": \"Next Article – Python: Dictionary and Keywords\"\n },\n {\n \"code\": null,\n \"e\": 2955,\n \"s\": 2926,\n \"text\": \"Quiz on Data Types in Python\"\n },\n {\n \"code\": null,\n \"e\": 2968,\n \"s\": 2955,\n \"text\": \"micronick_02\"\n },\n {\n \"code\": null,\n \"e\": 2984,\n \"s\": 2968,\n \"text\": \"mandvimishra123\"\n },\n {\n \"code\": null,\n \"e\": 2998,\n \"s\": 2984,\n \"text\": \"sheetal18june\"\n },\n {\n \"code\": null,\n \"e\": 3010,\n \"s\": 2998,\n \"text\": \"python-list\"\n },\n {\n \"code\": null,\n \"e\": 3023,\n \"s\": 3010,\n \"text\": \"python-tuple\"\n },\n {\n \"code\": null,\n \"e\": 3030,\n \"s\": 3023,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 3049,\n \"s\": 3030,\n \"text\": \"School Programming\"\n },\n {\n \"code\": null,\n \"e\": 3061,\n \"s\": 3049,\n \"text\": \"python-list\"\n }\n]"}}},{"rowIdx":31,"cells":{"title":{"kind":"string","value":"Counting frequencies of array elements"},"text":{"kind":"string","value":"21 Jun, 2022\nGiven an array which may contain duplicates, print all elements and their frequencies.\nExamples: \nInput : arr[] = {10, 20, 20, 10, 10, 20, 5, 20}\nOutput : 10 3\n 20 4\n 5 1\n\nInput : arr[] = {10, 20, 20}\nOutput : 10 1\n 20 2 \nA simple solution is to run two loops. For every item count number of times, it occurs. To avoid duplicate printing, keep track of processed items. \nC++\nJava\nPython3\nC#\nJavascript\n// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ // Mark all array elements as not visited vector visited(n, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } cout << arr[i] << \" \" << count << endl; }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\n// Java program to count frequencies of array itemsimport java.util.Arrays; class GFG{public static void countFreq(int arr[], int n){ boolean visited[] = new boolean[n]; Arrays.fill(visited, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } System.out.println(arr[i] + \" \" + count); }} // Driver codepublic static void main(String []args){ int arr[] = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.length; countFreq(arr, n);}} // This code contributed by Adarsh_Verma.\n# Python 3 program to count frequencies# of array itemsdef countFreq(arr, n): # Mark all array elements as not visited visited = [False for i in range(n)] # Traverse through array elements # and count frequencies for i in range(n): # Skip this element if already # processed if (visited[i] == True): continue # Count frequency count = 1 for j in range(i + 1, n, 1): if (arr[i] == arr[j]): visited[j] = True count += 1 print(arr[i], count) # Driver Codeif __name__ == '__main__': arr = [10, 20, 20, 10, 10, 20, 5, 20] n = len(arr) countFreq(arr, n) # This code is contributed by# Shashank_Sharma\n// C# program to count frequencies of array itemsusing System; class GFG{ public static void countFreq(int []arr, int n) { bool []visited = new bool[n]; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } Console.WriteLine(arr[i] + \" \" + count); } } // Driver code public static void Main(String []args) { int []arr = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.Length; countFreq(arr, n); }} // This code has been contributed by 29AjayKumar\n\n10 3\n20 4\n5 1\nTime Complexity : O(n2) Auxiliary Space : O(n)\nChapters\ndescriptions off, selected\ncaptions settings, opens captions settings dialog\ncaptions off, selected\nEnglish\nThis is a modal window.\nBeginning of dialog window. Escape will cancel and close the window.\nEnd of dialog window.\nAn efficient solution is to use hashing.\nC++\nJava\nPython3\nC#\nJavascript\n// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // Traverse through map and print frequencies for (auto x : mp) cout << x.first << \" \" << x.second << endl;} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\n// Java program to count frequencies of array itemsimport java.util.*; class GFG{ static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.containsKey(arr[i])) { mp.put(arr[i], mp.get(arr[i]) + 1); } else { mp.put(arr[i], 1); } } // Traverse through map and print frequencies for (Map.Entry entry : mp.entrySet()) { System.out.println(entry.getKey() + \" \" + entry.getValue()); } } // Driver code public static void main(String args[]) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji\n# Python3 program to count frequencies # of array itemsdef countFreq(arr, n): mp = dict() # Traverse through array elements # and count frequencies for i in range(n): if arr[i] in mp.keys(): mp[arr[i]] += 1 else: mp[arr[i]] = 1 # Traverse through map and print # frequencies for x in mp: print(x, \" \", mp[x]) # Driver codearr = [10, 20, 20, 10, 10, 20, 5, 20 ]n = len(arr)countFreq(arr, n) # This code is contributed by # Mohit kumar 29\n// C# implementation of the approachusing System;using System.Collections.Generic; class GFG{ static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // Traverse through map and print frequencies foreach(KeyValuePair entry in mp) { Console.WriteLine(entry.Key + \" \" + entry.Value); } } // Driver code public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); }} /* This code contributed by PrinciRaj1992 */\n\n5 1\n10 3\n20 4\nTime Complexity : O(n) Auxiliary Space : O(n)\nIn above efficient solution, how to print elements in same order as they appear in input? \nC++\nJava\nPython3\nC#\nJavascript\n// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp[arr[i]] != -1) { cout << arr[i] << \" \" << mp[arr[i]] << endl; mp[arr[i]] = -1; } }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\n// Java program to count frequencies of array items import java.util.*; class GFG { static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { mp.put(arr[i], mp.get(arr[i]) == null ? 1 : mp.get(arr[i]) + 1); } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.get(arr[i]) != -1) { System.out.println(arr[i] + \" \" + mp.get(arr[i])); mp.put(arr[i], -1); } } } // Driver code public static void main(String[] args) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji\n# Python3 program to count frequencies of array itemsdef countFreq(arr, n): mp = {} # Traverse through array elements and # count frequencies for i in range(n): if arr[i] not in mp: mp[arr[i]] = 0 mp[arr[i]] += 1 # To print elements according to first # occurrence, traverse array one more time # print frequencies of elements and mark # frequencies as -1 so that same element # is not printed multiple times. for i in range(n): if (mp[arr[i]] != -1): print(arr[i],mp[arr[i]]) mp[arr[i]] = -1 # Driver code arr = [10, 20, 20, 10, 10, 20, 5, 20]n = len(arr)countFreq(arr, n) # This code is contributed by shubhamsingh10\n// C# program to count frequencies of array items using System;using System.Collections.Generic; class GFG { static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0 ; i < n; i++) { if(mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i]) && mp[arr[i]] != -1) { Console.WriteLine(arr[i] + \" \" + mp[arr[i]]); mp.Remove(arr[i]); mp.Add(arr[i], -1); } } } // Driver code public static void Main(String[] args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); } } // This code is contributed by Princi Singh\n\n10 3\n20 4\n5 1\nTime Complexity : O(n) Auxiliary Space : O(n)\nThis problem can be solved in Java using Hashmap. Below is the program. \nC++\nJava\nPython3\nC#\nJavascript\n// C++ program to count frequencies of// integers in array using Hashmap#include using namespace std; void frequencyNumber(int arr[],int size){ // Creating a HashMap containing integer // as a key and occurrences as a value unordered_mapfreqMap; for (int i=0;i freqMap = new HashMap(); for (int i=0;i freqMap = new Dictionary(); for(int i = 0; i < size; i++){ if (freqMap.ContainsKey(arr[i])) { var val = freqMap[arr[i]]; freqMap.Remove(arr[i]); freqMap.Add(arr[i], val + 1); } else { freqMap.Add(arr[i], 1); } } // Printing the freqMap foreach(KeyValuePair entry in freqMap) { Console.WriteLine(entry.Key + \" \" + entry.Value); } } public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int size = arr.Length; frequencyNumber(arr,size); }}// This code is contributed by Taranpreet\n\n5 1\n10 3\n20 4\nTime Complexity: O(n) since using a single loop to track frequencyAuxiliary Space: O(n) for hashmap\nAjit kumar panigrahy\nmohit kumar 29\nShashank_Sharma\nAdarsh_Verma\n29AjayKumar\nRajput-Ji\nprinciraj1992\nprinci singh\nneo_700\nSHUBHAMSINGH10\nsanjoy_62\nitsok\nrrrtnx\npatel2127\nkk9826225\nsimmytarika5\nsurinderdawra388\nshinjanpatra\nsinghh3010\npolymatir3j\ncpp-unordered_map\nfrequency-counting\nArrays\nHash\nArrays\nHash\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nMaximum and minimum of an array using minimum number of comparisons\nTop 50 Array Coding Problems for Interviews\nMultidimensional Arrays in Java\nStack Data Structure (Introduction and Program)\nLinear Search\nWhat is Hashing | A Complete Tutorial\nGiven an array A[] and a number x, check for pair in A[] with sum as x (aka Two Sum)\nHashing | Set 1 (Introduction)\nInternal Working of HashMap in Java\nLongest Consecutive Subsequence"},"parsed":{"kind":"list like","value":[{"code":null,"e":52,"s":24,"text":"\n21 Jun, 2022"},{"code":null,"e":139,"s":52,"text":"Given an array which may contain duplicates, print all elements and their frequencies."},{"code":null,"e":150,"s":139,"text":"Examples: "},{"code":null,"e":300,"s":150,"text":"Input : arr[] = {10, 20, 20, 10, 10, 20, 5, 20}\nOutput : 10 3\n 20 4\n 5 1\n\nInput : arr[] = {10, 20, 20}\nOutput : 10 1\n 20 2 "},{"code":null,"e":449,"s":300,"text":"A simple solution is to run two loops. For every item count number of times, it occurs. To avoid duplicate printing, keep track of processed items. "},{"code":null,"e":453,"s":449,"text":"C++"},{"code":null,"e":458,"s":453,"text":"Java"},{"code":null,"e":466,"s":458,"text":"Python3"},{"code":null,"e":469,"s":466,"text":"C#"},{"code":null,"e":480,"s":469,"text":"Javascript"},{"code":"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ // Mark all array elements as not visited vector visited(n, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } cout << arr[i] << \" \" << count << endl; }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}","e":1293,"s":480,"text":null},{"code":"// Java program to count frequencies of array itemsimport java.util.Arrays; class GFG{public static void countFreq(int arr[], int n){ boolean visited[] = new boolean[n]; Arrays.fill(visited, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } System.out.println(arr[i] + \" \" + count); }} // Driver codepublic static void main(String []args){ int arr[] = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.length; countFreq(arr, n);}} // This code contributed by Adarsh_Verma.","e":2174,"s":1293,"text":null},{"code":"# Python 3 program to count frequencies# of array itemsdef countFreq(arr, n): # Mark all array elements as not visited visited = [False for i in range(n)] # Traverse through array elements # and count frequencies for i in range(n): # Skip this element if already # processed if (visited[i] == True): continue # Count frequency count = 1 for j in range(i + 1, n, 1): if (arr[i] == arr[j]): visited[j] = True count += 1 print(arr[i], count) # Driver Codeif __name__ == '__main__': arr = [10, 20, 20, 10, 10, 20, 5, 20] n = len(arr) countFreq(arr, n) # This code is contributed by# Shashank_Sharma","e":2932,"s":2174,"text":null},{"code":"// C# program to count frequencies of array itemsusing System; class GFG{ public static void countFreq(int []arr, int n) { bool []visited = new bool[n]; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } Console.WriteLine(arr[i] + \" \" + count); } } // Driver code public static void Main(String []args) { int []arr = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.Length; countFreq(arr, n); }} // This code has been contributed by 29AjayKumar","e":3921,"s":2932,"text":null},{"code":"","e":4689,"s":3921,"text":null},{"code":null,"e":4703,"s":4689,"text":"10 3\n20 4\n5 1"},{"code":null,"e":4750,"s":4703,"text":"Time Complexity : O(n2) Auxiliary Space : O(n)"},{"code":null,"e":4759,"s":4750,"text":"Chapters"},{"code":null,"e":4786,"s":4759,"text":"descriptions off, selected"},{"code":null,"e":4836,"s":4786,"text":"captions settings, opens captions settings dialog"},{"code":null,"e":4859,"s":4836,"text":"captions off, selected"},{"code":null,"e":4867,"s":4859,"text":"English"},{"code":null,"e":4891,"s":4867,"text":"This is a modal window."},{"code":null,"e":4960,"s":4891,"text":"Beginning of dialog window. Escape will cancel and close the window."},{"code":null,"e":4982,"s":4960,"text":"End of dialog window."},{"code":null,"e":5023,"s":4982,"text":"An efficient solution is to use hashing."},{"code":null,"e":5027,"s":5023,"text":"C++"},{"code":null,"e":5032,"s":5027,"text":"Java"},{"code":null,"e":5040,"s":5032,"text":"Python3"},{"code":null,"e":5043,"s":5040,"text":"C#"},{"code":null,"e":5054,"s":5043,"text":"Javascript"},{"code":"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // Traverse through map and print frequencies for (auto x : mp) cout << x.first << \" \" << x.second << endl;} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}","e":5599,"s":5054,"text":null},{"code":"// Java program to count frequencies of array itemsimport java.util.*; class GFG{ static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.containsKey(arr[i])) { mp.put(arr[i], mp.get(arr[i]) + 1); } else { mp.put(arr[i], 1); } } // Traverse through map and print frequencies for (Map.Entry entry : mp.entrySet()) { System.out.println(entry.getKey() + \" \" + entry.getValue()); } } // Driver code public static void main(String args[]) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji","e":6535,"s":5599,"text":null},{"code":"# Python3 program to count frequencies # of array itemsdef countFreq(arr, n): mp = dict() # Traverse through array elements # and count frequencies for i in range(n): if arr[i] in mp.keys(): mp[arr[i]] += 1 else: mp[arr[i]] = 1 # Traverse through map and print # frequencies for x in mp: print(x, \" \", mp[x]) # Driver codearr = [10, 20, 20, 10, 10, 20, 5, 20 ]n = len(arr)countFreq(arr, n) # This code is contributed by # Mohit kumar 29","e":7058,"s":6535,"text":null},{"code":"// C# implementation of the approachusing System;using System.Collections.Generic; class GFG{ static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // Traverse through map and print frequencies foreach(KeyValuePair entry in mp) { Console.WriteLine(entry.Key + \" \" + entry.Value); } } // Driver code public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); }} /* This code contributed by PrinciRaj1992 */","e":8071,"s":7058,"text":null},{"code":"","e":8773,"s":8071,"text":null},{"code":null,"e":8787,"s":8773,"text":"5 1\n10 3\n20 4"},{"code":null,"e":8833,"s":8787,"text":"Time Complexity : O(n) Auxiliary Space : O(n)"},{"code":null,"e":8924,"s":8833,"text":"In above efficient solution, how to print elements in same order as they appear in input? "},{"code":null,"e":8928,"s":8924,"text":"C++"},{"code":null,"e":8933,"s":8928,"text":"Java"},{"code":null,"e":8941,"s":8933,"text":"Python3"},{"code":null,"e":8944,"s":8941,"text":"C#"},{"code":null,"e":8955,"s":8944,"text":"Javascript"},{"code":"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp[arr[i]] != -1) { cout << arr[i] << \" \" << mp[arr[i]] << endl; mp[arr[i]] = -1; } }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}","e":9755,"s":8955,"text":null},{"code":"// Java program to count frequencies of array items import java.util.*; class GFG { static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { mp.put(arr[i], mp.get(arr[i]) == null ? 1 : mp.get(arr[i]) + 1); } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.get(arr[i]) != -1) { System.out.println(arr[i] + \" \" + mp.get(arr[i])); mp.put(arr[i], -1); } } } // Driver code public static void main(String[] args) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji","e":10835,"s":9755,"text":null},{"code":"# Python3 program to count frequencies of array itemsdef countFreq(arr, n): mp = {} # Traverse through array elements and # count frequencies for i in range(n): if arr[i] not in mp: mp[arr[i]] = 0 mp[arr[i]] += 1 # To print elements according to first # occurrence, traverse array one more time # print frequencies of elements and mark # frequencies as -1 so that same element # is not printed multiple times. for i in range(n): if (mp[arr[i]] != -1): print(arr[i],mp[arr[i]]) mp[arr[i]] = -1 # Driver code arr = [10, 20, 20, 10, 10, 20, 5, 20]n = len(arr)countFreq(arr, n) # This code is contributed by shubhamsingh10","e":11559,"s":10835,"text":null},{"code":"// C# program to count frequencies of array items using System;using System.Collections.Generic; class GFG { static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0 ; i < n; i++) { if(mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i]) && mp[arr[i]] != -1) { Console.WriteLine(arr[i] + \" \" + mp[arr[i]]); mp.Remove(arr[i]); mp.Add(arr[i], -1); } } } // Driver code public static void Main(String[] args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); } } // This code is contributed by Princi Singh","e":12933,"s":11559,"text":null},{"code":"","e":13807,"s":12933,"text":null},{"code":null,"e":13821,"s":13807,"text":"10 3\n20 4\n5 1"},{"code":null,"e":13867,"s":13821,"text":"Time Complexity : O(n) Auxiliary Space : O(n)"},{"code":null,"e":13940,"s":13867,"text":"This problem can be solved in Java using Hashmap. Below is the program. "},{"code":null,"e":13944,"s":13940,"text":"C++"},{"code":null,"e":13949,"s":13944,"text":"Java"},{"code":null,"e":13957,"s":13949,"text":"Python3"},{"code":null,"e":13960,"s":13957,"text":"C#"},{"code":null,"e":13971,"s":13960,"text":"Javascript"},{"code":"// C++ program to count frequencies of// integers in array using Hashmap#include using namespace std; void frequencyNumber(int arr[],int size){ // Creating a HashMap containing integer // as a key and occurrences as a value unordered_mapfreqMap; for (int i=0;i freqMap = new HashMap(); for (int i=0;i freqMap = new Dictionary(); for(int i = 0; i < size; i++){ if (freqMap.ContainsKey(arr[i])) { var val = freqMap[arr[i]]; freqMap.Remove(arr[i]); freqMap.Add(arr[i], val + 1); } else { freqMap.Add(arr[i], 1); } } // Printing the freqMap foreach(KeyValuePair entry in freqMap) { Console.WriteLine(entry.Key + \" \" + entry.Value); } } public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int size = arr.Length; frequencyNumber(arr,size); }}// This code is contributed by Taranpreet","e":17570,"s":16640,"text":null},{"code":"","e":18592,"s":17570,"text":null},{"code":null,"e":18606,"s":18592,"text":"5 1\n10 3\n20 4"},{"code":null,"e":18706,"s":18606,"text":"Time Complexity: O(n) since using a single loop to track frequencyAuxiliary Space: O(n) for hashmap"},{"code":null,"e":18727,"s":18706,"text":"Ajit kumar panigrahy"},{"code":null,"e":18742,"s":18727,"text":"mohit kumar 29"},{"code":null,"e":18758,"s":18742,"text":"Shashank_Sharma"},{"code":null,"e":18771,"s":18758,"text":"Adarsh_Verma"},{"code":null,"e":18783,"s":18771,"text":"29AjayKumar"},{"code":null,"e":18793,"s":18783,"text":"Rajput-Ji"},{"code":null,"e":18807,"s":18793,"text":"princiraj1992"},{"code":null,"e":18820,"s":18807,"text":"princi singh"},{"code":null,"e":18828,"s":18820,"text":"neo_700"},{"code":null,"e":18843,"s":18828,"text":"SHUBHAMSINGH10"},{"code":null,"e":18853,"s":18843,"text":"sanjoy_62"},{"code":null,"e":18859,"s":18853,"text":"itsok"},{"code":null,"e":18866,"s":18859,"text":"rrrtnx"},{"code":null,"e":18876,"s":18866,"text":"patel2127"},{"code":null,"e":18886,"s":18876,"text":"kk9826225"},{"code":null,"e":18899,"s":18886,"text":"simmytarika5"},{"code":null,"e":18916,"s":18899,"text":"surinderdawra388"},{"code":null,"e":18929,"s":18916,"text":"shinjanpatra"},{"code":null,"e":18940,"s":18929,"text":"singhh3010"},{"code":null,"e":18952,"s":18940,"text":"polymatir3j"},{"code":null,"e":18970,"s":18952,"text":"cpp-unordered_map"},{"code":null,"e":18989,"s":18970,"text":"frequency-counting"},{"code":null,"e":18996,"s":18989,"text":"Arrays"},{"code":null,"e":19001,"s":18996,"text":"Hash"},{"code":null,"e":19008,"s":19001,"text":"Arrays"},{"code":null,"e":19013,"s":19008,"text":"Hash"},{"code":null,"e":19111,"s":19013,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":19179,"s":19111,"text":"Maximum and minimum of an array using minimum number of comparisons"},{"code":null,"e":19223,"s":19179,"text":"Top 50 Array Coding Problems for Interviews"},{"code":null,"e":19255,"s":19223,"text":"Multidimensional Arrays in Java"},{"code":null,"e":19303,"s":19255,"text":"Stack Data Structure (Introduction and Program)"},{"code":null,"e":19317,"s":19303,"text":"Linear Search"},{"code":null,"e":19355,"s":19317,"text":"What is Hashing | A Complete Tutorial"},{"code":null,"e":19440,"s":19355,"text":"Given an array A[] and a number x, check for pair in A[] with sum as x (aka Two Sum)"},{"code":null,"e":19471,"s":19440,"text":"Hashing | Set 1 (Introduction)"},{"code":null,"e":19507,"s":19471,"text":"Internal Working of HashMap in Java"}],"string":"[\n {\n \"code\": null,\n \"e\": 52,\n \"s\": 24,\n \"text\": \"\\n21 Jun, 2022\"\n },\n {\n \"code\": null,\n \"e\": 139,\n \"s\": 52,\n \"text\": \"Given an array which may contain duplicates, print all elements and their frequencies.\"\n },\n {\n \"code\": null,\n \"e\": 150,\n \"s\": 139,\n \"text\": \"Examples: \"\n },\n {\n \"code\": null,\n \"e\": 300,\n \"s\": 150,\n \"text\": \"Input : arr[] = {10, 20, 20, 10, 10, 20, 5, 20}\\nOutput : 10 3\\n 20 4\\n 5 1\\n\\nInput : arr[] = {10, 20, 20}\\nOutput : 10 1\\n 20 2 \"\n },\n {\n \"code\": null,\n \"e\": 449,\n \"s\": 300,\n \"text\": \"A simple solution is to run two loops. For every item count number of times, it occurs. To avoid duplicate printing, keep track of processed items. \"\n },\n {\n \"code\": null,\n \"e\": 453,\n \"s\": 449,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 458,\n \"s\": 453,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 466,\n \"s\": 458,\n \"text\": \"Python3\"\n },\n {\n \"code\": null,\n \"e\": 469,\n \"s\": 466,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 480,\n \"s\": 469,\n \"text\": \"Javascript\"\n },\n {\n \"code\": \"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ // Mark all array elements as not visited vector visited(n, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } cout << arr[i] << \\\" \\\" << count << endl; }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\",\n \"e\": 1293,\n \"s\": 480,\n \"text\": null\n },\n {\n \"code\": \"// Java program to count frequencies of array itemsimport java.util.Arrays; class GFG{public static void countFreq(int arr[], int n){ boolean visited[] = new boolean[n]; Arrays.fill(visited, false); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } System.out.println(arr[i] + \\\" \\\" + count); }} // Driver codepublic static void main(String []args){ int arr[] = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.length; countFreq(arr, n);}} // This code contributed by Adarsh_Verma.\",\n \"e\": 2174,\n \"s\": 1293,\n \"text\": null\n },\n {\n \"code\": \"# Python 3 program to count frequencies# of array itemsdef countFreq(arr, n): # Mark all array elements as not visited visited = [False for i in range(n)] # Traverse through array elements # and count frequencies for i in range(n): # Skip this element if already # processed if (visited[i] == True): continue # Count frequency count = 1 for j in range(i + 1, n, 1): if (arr[i] == arr[j]): visited[j] = True count += 1 print(arr[i], count) # Driver Codeif __name__ == '__main__': arr = [10, 20, 20, 10, 10, 20, 5, 20] n = len(arr) countFreq(arr, n) # This code is contributed by# Shashank_Sharma\",\n \"e\": 2932,\n \"s\": 2174,\n \"text\": null\n },\n {\n \"code\": \"// C# program to count frequencies of array itemsusing System; class GFG{ public static void countFreq(int []arr, int n) { bool []visited = new bool[n]; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { // Skip this element if already processed if (visited[i] == true) continue; // Count frequency int count = 1; for (int j = i + 1; j < n; j++) { if (arr[i] == arr[j]) { visited[j] = true; count++; } } Console.WriteLine(arr[i] + \\\" \\\" + count); } } // Driver code public static void Main(String []args) { int []arr = new int[]{ 10, 20, 20, 10, 10, 20, 5, 20 }; int n = arr.Length; countFreq(arr, n); }} // This code has been contributed by 29AjayKumar\",\n \"e\": 3921,\n \"s\": 2932,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 4689,\n \"s\": 3921,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 4703,\n \"s\": 4689,\n \"text\": \"10 3\\n20 4\\n5 1\"\n },\n {\n \"code\": null,\n \"e\": 4750,\n \"s\": 4703,\n \"text\": \"Time Complexity : O(n2) Auxiliary Space : O(n)\"\n },\n {\n \"code\": null,\n \"e\": 4759,\n \"s\": 4750,\n \"text\": \"Chapters\"\n },\n {\n \"code\": null,\n \"e\": 4786,\n \"s\": 4759,\n \"text\": \"descriptions off, selected\"\n },\n {\n \"code\": null,\n \"e\": 4836,\n \"s\": 4786,\n \"text\": \"captions settings, opens captions settings dialog\"\n },\n {\n \"code\": null,\n \"e\": 4859,\n \"s\": 4836,\n \"text\": \"captions off, selected\"\n },\n {\n \"code\": null,\n \"e\": 4867,\n \"s\": 4859,\n \"text\": \"English\"\n },\n {\n \"code\": null,\n \"e\": 4891,\n \"s\": 4867,\n \"text\": \"This is a modal window.\"\n },\n {\n \"code\": null,\n \"e\": 4960,\n \"s\": 4891,\n \"text\": \"Beginning of dialog window. Escape will cancel and close the window.\"\n },\n {\n \"code\": null,\n \"e\": 4982,\n \"s\": 4960,\n \"text\": \"End of dialog window.\"\n },\n {\n \"code\": null,\n \"e\": 5023,\n \"s\": 4982,\n \"text\": \"An efficient solution is to use hashing.\"\n },\n {\n \"code\": null,\n \"e\": 5027,\n \"s\": 5023,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 5032,\n \"s\": 5027,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 5040,\n \"s\": 5032,\n \"text\": \"Python3\"\n },\n {\n \"code\": null,\n \"e\": 5043,\n \"s\": 5040,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 5054,\n \"s\": 5043,\n \"text\": \"Javascript\"\n },\n {\n \"code\": \"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // Traverse through map and print frequencies for (auto x : mp) cout << x.first << \\\" \\\" << x.second << endl;} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\",\n \"e\": 5599,\n \"s\": 5054,\n \"text\": null\n },\n {\n \"code\": \"// Java program to count frequencies of array itemsimport java.util.*; class GFG{ static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.containsKey(arr[i])) { mp.put(arr[i], mp.get(arr[i]) + 1); } else { mp.put(arr[i], 1); } } // Traverse through map and print frequencies for (Map.Entry entry : mp.entrySet()) { System.out.println(entry.getKey() + \\\" \\\" + entry.getValue()); } } // Driver code public static void main(String args[]) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji\",\n \"e\": 6535,\n \"s\": 5599,\n \"text\": null\n },\n {\n \"code\": \"# Python3 program to count frequencies # of array itemsdef countFreq(arr, n): mp = dict() # Traverse through array elements # and count frequencies for i in range(n): if arr[i] in mp.keys(): mp[arr[i]] += 1 else: mp[arr[i]] = 1 # Traverse through map and print # frequencies for x in mp: print(x, \\\" \\\", mp[x]) # Driver codearr = [10, 20, 20, 10, 10, 20, 5, 20 ]n = len(arr)countFreq(arr, n) # This code is contributed by # Mohit kumar 29\",\n \"e\": 7058,\n \"s\": 6535,\n \"text\": null\n },\n {\n \"code\": \"// C# implementation of the approachusing System;using System.Collections.Generic; class GFG{ static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // Traverse through map and print frequencies foreach(KeyValuePair entry in mp) { Console.WriteLine(entry.Key + \\\" \\\" + entry.Value); } } // Driver code public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); }} /* This code contributed by PrinciRaj1992 */\",\n \"e\": 8071,\n \"s\": 7058,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 8773,\n \"s\": 8071,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 8787,\n \"s\": 8773,\n \"text\": \"5 1\\n10 3\\n20 4\"\n },\n {\n \"code\": null,\n \"e\": 8833,\n \"s\": 8787,\n \"text\": \"Time Complexity : O(n) Auxiliary Space : O(n)\"\n },\n {\n \"code\": null,\n \"e\": 8924,\n \"s\": 8833,\n \"text\": \"In above efficient solution, how to print elements in same order as they appear in input? \"\n },\n {\n \"code\": null,\n \"e\": 8928,\n \"s\": 8924,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 8933,\n \"s\": 8928,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 8941,\n \"s\": 8933,\n \"text\": \"Python3\"\n },\n {\n \"code\": null,\n \"e\": 8944,\n \"s\": 8941,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 8955,\n \"s\": 8944,\n \"text\": \"Javascript\"\n },\n {\n \"code\": \"// CPP program to count frequencies of array items#include using namespace std; void countFreq(int arr[], int n){ unordered_map mp; // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) mp[arr[i]]++; // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp[arr[i]] != -1) { cout << arr[i] << \\\" \\\" << mp[arr[i]] << endl; mp[arr[i]] = -1; } }} int main(){ int arr[] = { 10, 20, 20, 10, 10, 20, 5, 20 }; int n = sizeof(arr) / sizeof(arr[0]); countFreq(arr, n); return 0;}\",\n \"e\": 9755,\n \"s\": 8955,\n \"text\": null\n },\n {\n \"code\": \"// Java program to count frequencies of array items import java.util.*; class GFG { static void countFreq(int arr[], int n) { Map mp = new HashMap<>(); // Traverse through array elements and // count frequencies for (int i = 0; i < n; i++) { mp.put(arr[i], mp.get(arr[i]) == null ? 1 : mp.get(arr[i]) + 1); } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.get(arr[i]) != -1) { System.out.println(arr[i] + \\\" \\\" + mp.get(arr[i])); mp.put(arr[i], -1); } } } // Driver code public static void main(String[] args) { int arr[] = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.length; countFreq(arr, n); }} // This code contributed by Rajput-Ji\",\n \"e\": 10835,\n \"s\": 9755,\n \"text\": null\n },\n {\n \"code\": \"# Python3 program to count frequencies of array itemsdef countFreq(arr, n): mp = {} # Traverse through array elements and # count frequencies for i in range(n): if arr[i] not in mp: mp[arr[i]] = 0 mp[arr[i]] += 1 # To print elements according to first # occurrence, traverse array one more time # print frequencies of elements and mark # frequencies as -1 so that same element # is not printed multiple times. for i in range(n): if (mp[arr[i]] != -1): print(arr[i],mp[arr[i]]) mp[arr[i]] = -1 # Driver code arr = [10, 20, 20, 10, 10, 20, 5, 20]n = len(arr)countFreq(arr, n) # This code is contributed by shubhamsingh10\",\n \"e\": 11559,\n \"s\": 10835,\n \"text\": null\n },\n {\n \"code\": \"// C# program to count frequencies of array items using System;using System.Collections.Generic; class GFG { static void countFreq(int []arr, int n) { Dictionary mp = new Dictionary(); // Traverse through array elements and // count frequencies for (int i = 0 ; i < n; i++) { if(mp.ContainsKey(arr[i])) { var val = mp[arr[i]]; mp.Remove(arr[i]); mp.Add(arr[i], val + 1); } else { mp.Add(arr[i], 1); } } // To print elements according to first // occurrence, traverse array one more time // print frequencies of elements and mark // frequencies as -1 so that same element // is not printed multiple times. for (int i = 0; i < n; i++) { if (mp.ContainsKey(arr[i]) && mp[arr[i]] != -1) { Console.WriteLine(arr[i] + \\\" \\\" + mp[arr[i]]); mp.Remove(arr[i]); mp.Add(arr[i], -1); } } } // Driver code public static void Main(String[] args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int n = arr.Length; countFreq(arr, n); } } // This code is contributed by Princi Singh\",\n \"e\": 12933,\n \"s\": 11559,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 13807,\n \"s\": 12933,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 13821,\n \"s\": 13807,\n \"text\": \"10 3\\n20 4\\n5 1\"\n },\n {\n \"code\": null,\n \"e\": 13867,\n \"s\": 13821,\n \"text\": \"Time Complexity : O(n) Auxiliary Space : O(n)\"\n },\n {\n \"code\": null,\n \"e\": 13940,\n \"s\": 13867,\n \"text\": \"This problem can be solved in Java using Hashmap. Below is the program. \"\n },\n {\n \"code\": null,\n \"e\": 13944,\n \"s\": 13940,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 13949,\n \"s\": 13944,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 13957,\n \"s\": 13949,\n \"text\": \"Python3\"\n },\n {\n \"code\": null,\n \"e\": 13960,\n \"s\": 13957,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 13971,\n \"s\": 13960,\n \"text\": \"Javascript\"\n },\n {\n \"code\": \"// C++ program to count frequencies of// integers in array using Hashmap#include using namespace std; void frequencyNumber(int arr[],int size){ // Creating a HashMap containing integer // as a key and occurrences as a value unordered_mapfreqMap; for (int i=0;i freqMap = new HashMap(); for (int i=0;i freqMap = new Dictionary(); for(int i = 0; i < size; i++){ if (freqMap.ContainsKey(arr[i])) { var val = freqMap[arr[i]]; freqMap.Remove(arr[i]); freqMap.Add(arr[i], val + 1); } else { freqMap.Add(arr[i], 1); } } // Printing the freqMap foreach(KeyValuePair entry in freqMap) { Console.WriteLine(entry.Key + \\\" \\\" + entry.Value); } } public static void Main(String []args) { int []arr = {10, 20, 20, 10, 10, 20, 5, 20}; int size = arr.Length; frequencyNumber(arr,size); }}// This code is contributed by Taranpreet\",\n \"e\": 17570,\n \"s\": 16640,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 18592,\n \"s\": 17570,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 18606,\n \"s\": 18592,\n \"text\": \"5 1\\n10 3\\n20 4\"\n },\n {\n \"code\": null,\n \"e\": 18706,\n \"s\": 18606,\n \"text\": \"Time Complexity: O(n) since using a single loop to track frequencyAuxiliary Space: O(n) for hashmap\"\n },\n {\n \"code\": null,\n \"e\": 18727,\n \"s\": 18706,\n \"text\": \"Ajit kumar panigrahy\"\n },\n {\n \"code\": null,\n \"e\": 18742,\n \"s\": 18727,\n \"text\": \"mohit kumar 29\"\n },\n {\n \"code\": null,\n \"e\": 18758,\n \"s\": 18742,\n \"text\": \"Shashank_Sharma\"\n },\n {\n \"code\": null,\n \"e\": 18771,\n \"s\": 18758,\n \"text\": \"Adarsh_Verma\"\n },\n {\n \"code\": null,\n \"e\": 18783,\n \"s\": 18771,\n \"text\": \"29AjayKumar\"\n },\n {\n \"code\": null,\n \"e\": 18793,\n \"s\": 18783,\n \"text\": \"Rajput-Ji\"\n },\n {\n \"code\": null,\n \"e\": 18807,\n \"s\": 18793,\n \"text\": \"princiraj1992\"\n },\n {\n \"code\": null,\n \"e\": 18820,\n \"s\": 18807,\n \"text\": \"princi singh\"\n },\n {\n \"code\": null,\n \"e\": 18828,\n \"s\": 18820,\n \"text\": \"neo_700\"\n },\n {\n \"code\": null,\n \"e\": 18843,\n \"s\": 18828,\n \"text\": \"SHUBHAMSINGH10\"\n },\n {\n \"code\": null,\n \"e\": 18853,\n \"s\": 18843,\n \"text\": \"sanjoy_62\"\n },\n {\n \"code\": null,\n \"e\": 18859,\n \"s\": 18853,\n \"text\": \"itsok\"\n },\n {\n \"code\": null,\n \"e\": 18866,\n \"s\": 18859,\n \"text\": \"rrrtnx\"\n },\n {\n \"code\": null,\n \"e\": 18876,\n \"s\": 18866,\n \"text\": \"patel2127\"\n },\n {\n \"code\": null,\n \"e\": 18886,\n \"s\": 18876,\n \"text\": \"kk9826225\"\n },\n {\n \"code\": null,\n \"e\": 18899,\n \"s\": 18886,\n \"text\": \"simmytarika5\"\n },\n {\n \"code\": null,\n \"e\": 18916,\n \"s\": 18899,\n \"text\": \"surinderdawra388\"\n },\n {\n \"code\": null,\n \"e\": 18929,\n \"s\": 18916,\n \"text\": \"shinjanpatra\"\n },\n {\n \"code\": null,\n \"e\": 18940,\n \"s\": 18929,\n \"text\": \"singhh3010\"\n },\n {\n \"code\": null,\n \"e\": 18952,\n \"s\": 18940,\n \"text\": \"polymatir3j\"\n },\n {\n \"code\": null,\n \"e\": 18970,\n \"s\": 18952,\n \"text\": \"cpp-unordered_map\"\n },\n {\n \"code\": null,\n \"e\": 18989,\n \"s\": 18970,\n \"text\": \"frequency-counting\"\n },\n {\n \"code\": null,\n \"e\": 18996,\n \"s\": 18989,\n \"text\": \"Arrays\"\n },\n {\n \"code\": null,\n \"e\": 19001,\n \"s\": 18996,\n \"text\": \"Hash\"\n },\n {\n \"code\": null,\n \"e\": 19008,\n \"s\": 19001,\n \"text\": \"Arrays\"\n },\n {\n \"code\": null,\n \"e\": 19013,\n \"s\": 19008,\n \"text\": \"Hash\"\n },\n {\n \"code\": null,\n \"e\": 19111,\n \"s\": 19013,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 19179,\n \"s\": 19111,\n \"text\": \"Maximum and minimum of an array using minimum number of comparisons\"\n },\n {\n \"code\": null,\n \"e\": 19223,\n \"s\": 19179,\n \"text\": \"Top 50 Array Coding Problems for Interviews\"\n },\n {\n \"code\": null,\n \"e\": 19255,\n \"s\": 19223,\n \"text\": \"Multidimensional Arrays in Java\"\n },\n {\n \"code\": null,\n \"e\": 19303,\n \"s\": 19255,\n \"text\": \"Stack Data Structure (Introduction and Program)\"\n },\n {\n \"code\": null,\n \"e\": 19317,\n \"s\": 19303,\n \"text\": \"Linear Search\"\n },\n {\n \"code\": null,\n \"e\": 19355,\n \"s\": 19317,\n \"text\": \"What is Hashing | A Complete Tutorial\"\n },\n {\n \"code\": null,\n \"e\": 19440,\n \"s\": 19355,\n \"text\": \"Given an array A[] and a number x, check for pair in A[] with sum as x (aka Two Sum)\"\n },\n {\n \"code\": null,\n \"e\": 19471,\n \"s\": 19440,\n \"text\": \"Hashing | Set 1 (Introduction)\"\n },\n {\n \"code\": null,\n \"e\": 19507,\n \"s\": 19471,\n \"text\": \"Internal Working of HashMap in Java\"\n }\n]"}}},{"rowIdx":32,"cells":{"title":{"kind":"string","value":"How to swap two bits in a given integer?"},"text":{"kind":"string","value":"07 Jun, 2022\nGiven an integer n and two-bit positions p1 and p2 inside it, swap bits at the given positions. The given positions are from the least significant bit (lsb). For example, the position for lsb is 0.Examples: \nInput: n = 28, p1 = 0, p2 = 3Output: 21Explaination: 28 in binary is 11100. If we swap 0’th and 3rd digits, we get 10101 which is 21 in decimal.\nInput: n = 20, p1 = 2, p2 = 3Output: 24\nWe strongly recommend you minimize your browser and try this yourself first.Method 1: The idea is to first find the bits, then use XOR based swapping concept, i..e., to swap two numbers ‘x’ and ‘y’, we do x = x ^ y, y = y ^ x, and x = x ^ y.\nBelow is the implementation of the above idea\nC++\nC\nJava\nC#\nJavascript\nPython3\n// C++ program to swap bits in an integer#includeusing namespace std; // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); cout<<\"Result = \"<< res<<\" \"; return 0;} // This code is contributed by pratham76.\n// C program to swap bits in an integer#include // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); printf(\"Result = %d \", res); return 0;}\n// Java program to swap bits in an integerimport java.io.*; class GFG{ // This function swaps bit at// positions p1 and p2 in an integer nstatic int swapBits( int n, int p1, int p2){ /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result;} /* Driver code*/ public static void main (String[] args) { int res = swapBits(28, 0, 3); System.out.println (\"Result = \" + res); }} // This code is contributed by ajit..\n// C# program to swap bits in an integerusing System;class GFG{ // This function swaps bit at // positions p1 and p2 in an integer n static int swapBits( int n, int p1, int p2) { /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result; } /* Driver code*/ public static void Main(string[] args) { int res = swapBits(28, 0, 3); Console.Write(\"Result = \" + res); }} // This code is contributed by rutvik_56.\n\n# Python3 program for the above approach # This function swaps bit at positions p1 and p2 in an integer ndef swapBits(n, p1, p2): # Move p1'th to rightmost side bit1 = (n >> p1) & 1 # Move p2'th to rightmost side bit2 = (n >> p2) & 1 # XOR the two bits x = (bit1 ^ bit2) # Put the xor bit back to their original positions x = (x << p1) | (x << p2) # XOR 'x' with the original number so that the # two sets are swapped result = n ^ x return result # Driver program to test above functionif __name__ == '__main__': res = swapBits(28, 0, 3) print(\"Result = \", res) # This code is contributed by nirajgusain5\nResult = 21 \nTime Complexity: O(1)Auxiliary Space: O(1) \nC++\nC\nJava\nPython\nC#\nJavascript\n//C++ code for swapping given bits of a number#includeusing namespace std;int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ cout << \"Result = \" << swapBits(28, 0, 3); return 0;}\n//C code for swapping given bits of a number#includeint swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ printf(\"Result = %d\", swapBits(28, 0, 3)); return 0;}\n// Java code for swapping// given bits of a numberimport java.util.*;class Main{ public static int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and // p2 times and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n;} // Driver codepublic static void main(String[] args){ System.out.print(\"Result = \" + swapBits(28, 0, 3));}} // This code is contributed by divyeshrabadiya07\n# Python code for swapping given bits of a numberdef swapBits(n, p1, p2): # left-shift 1 p1 and p2 times # and using XOR if ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2): n ^= 1 << p1 n ^= 1 << p2 return n # Driver Codeprint(\"Result =\",swapBits(28, 0, 3)) # This code is contributed by rag2127\n// C# code for swapping given bits of a numberusing System;class GFG { static int swapBits(int n, int p1, int p2) { // left-shift 1 p1 and p2 times // and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2))); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n; } // Driver code static void Main() { Console.WriteLine(\"Result = \" + swapBits(28, 0, 3)); }} // This code is contributed by divyesh072019\n\nResult = 21\nTime Complexity: O(1)Auxiliary Space: O(1)\nPlease write comments if you find anything incorrect, or if you want to share more information about the topic discussed above\njit_t\nSHUBHAMSINGH10\nnidhi_biet\nyashbeersingh42\ndivyeshrabadiya07\nrag2127\ndivyesh072019\nrutvik_56\npratham76\nsuresh07\nprinci singh\nnirajgusain5\nsubham348\nmohammad shuaib sidd\nharendrakumar123\nBit Magic\nBit Magic\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":54,"s":26,"text":"\n07 Jun, 2022"},{"code":null,"e":262,"s":54,"text":"Given an integer n and two-bit positions p1 and p2 inside it, swap bits at the given positions. The given positions are from the least significant bit (lsb). For example, the position for lsb is 0.Examples: "},{"code":null,"e":408,"s":262,"text":"Input: n = 28, p1 = 0, p2 = 3Output: 21Explaination: 28 in binary is 11100. If we swap 0’th and 3rd digits, we get 10101 which is 21 in decimal."},{"code":null,"e":448,"s":408,"text":"Input: n = 20, p1 = 2, p2 = 3Output: 24"},{"code":null,"e":691,"s":448,"text":"We strongly recommend you minimize your browser and try this yourself first.Method 1: The idea is to first find the bits, then use XOR based swapping concept, i..e., to swap two numbers ‘x’ and ‘y’, we do x = x ^ y, y = y ^ x, and x = x ^ y."},{"code":null,"e":737,"s":691,"text":"Below is the implementation of the above idea"},{"code":null,"e":741,"s":737,"text":"C++"},{"code":null,"e":743,"s":741,"text":"C"},{"code":null,"e":748,"s":743,"text":"Java"},{"code":null,"e":751,"s":748,"text":"C#"},{"code":null,"e":762,"s":751,"text":"Javascript"},{"code":null,"e":770,"s":762,"text":"Python3"},{"code":"// C++ program to swap bits in an integer#includeusing namespace std; // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); cout<<\"Result = \"<< res<<\" \"; return 0;} // This code is contributed by pratham76.","e":1582,"s":770,"text":null},{"code":"// C program to swap bits in an integer#include // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); printf(\"Result = %d \", res); return 0;}","e":2323,"s":1582,"text":null},{"code":"// Java program to swap bits in an integerimport java.io.*; class GFG{ // This function swaps bit at// positions p1 and p2 in an integer nstatic int swapBits( int n, int p1, int p2){ /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result;} /* Driver code*/ public static void main (String[] args) { int res = swapBits(28, 0, 3); System.out.println (\"Result = \" + res); }} // This code is contributed by ajit..","e":3111,"s":2323,"text":null},{"code":"// C# program to swap bits in an integerusing System;class GFG{ // This function swaps bit at // positions p1 and p2 in an integer n static int swapBits( int n, int p1, int p2) { /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result; } /* Driver code*/ public static void Main(string[] args) { int res = swapBits(28, 0, 3); Console.Write(\"Result = \" + res); }} // This code is contributed by rutvik_56.","e":3880,"s":3111,"text":null},{"code":"","e":4576,"s":3880,"text":null},{"code":"# Python3 program for the above approach # This function swaps bit at positions p1 and p2 in an integer ndef swapBits(n, p1, p2): # Move p1'th to rightmost side bit1 = (n >> p1) & 1 # Move p2'th to rightmost side bit2 = (n >> p2) & 1 # XOR the two bits x = (bit1 ^ bit2) # Put the xor bit back to their original positions x = (x << p1) | (x << p2) # XOR 'x' with the original number so that the # two sets are swapped result = n ^ x return result # Driver program to test above functionif __name__ == '__main__': res = swapBits(28, 0, 3) print(\"Result = \", res) # This code is contributed by nirajgusain5","e":5229,"s":4576,"text":null},{"code":null,"e":5242,"s":5229,"text":"Result = 21 "},{"code":null,"e":5286,"s":5242,"text":"Time Complexity: O(1)Auxiliary Space: O(1) "},{"code":null,"e":5290,"s":5286,"text":"C++"},{"code":null,"e":5292,"s":5290,"text":"C"},{"code":null,"e":5297,"s":5292,"text":"Java"},{"code":null,"e":5304,"s":5297,"text":"Python"},{"code":null,"e":5307,"s":5304,"text":"C#"},{"code":null,"e":5318,"s":5307,"text":"Javascript"},{"code":"//C++ code for swapping given bits of a number#includeusing namespace std;int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ cout << \"Result = \" << swapBits(28, 0, 3); return 0;}","e":5683,"s":5318,"text":null},{"code":"//C code for swapping given bits of a number#includeint swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ printf(\"Result = %d\", swapBits(28, 0, 3)); return 0;}","e":6039,"s":5683,"text":null},{"code":"// Java code for swapping// given bits of a numberimport java.util.*;class Main{ public static int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and // p2 times and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n;} // Driver codepublic static void main(String[] args){ System.out.print(\"Result = \" + swapBits(28, 0, 3));}} // This code is contributed by divyeshrabadiya07","e":6585,"s":6039,"text":null},{"code":"# Python code for swapping given bits of a numberdef swapBits(n, p1, p2): # left-shift 1 p1 and p2 times # and using XOR if ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2): n ^= 1 << p1 n ^= 1 << p2 return n # Driver Codeprint(\"Result =\",swapBits(28, 0, 3)) # This code is contributed by rag2127","e":6906,"s":6585,"text":null},{"code":"// C# code for swapping given bits of a numberusing System;class GFG { static int swapBits(int n, int p1, int p2) { // left-shift 1 p1 and p2 times // and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2))); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n; } // Driver code static void Main() { Console.WriteLine(\"Result = \" + swapBits(28, 0, 3)); }} // This code is contributed by divyesh072019","e":7404,"s":6906,"text":null},{"code":"","e":7755,"s":7404,"text":null},{"code":null,"e":7767,"s":7755,"text":"Result = 21"},{"code":null,"e":7810,"s":7767,"text":"Time Complexity: O(1)Auxiliary Space: O(1)"},{"code":null,"e":7937,"s":7810,"text":"Please write comments if you find anything incorrect, or if you want to share more information about the topic discussed above"},{"code":null,"e":7943,"s":7937,"text":"jit_t"},{"code":null,"e":7958,"s":7943,"text":"SHUBHAMSINGH10"},{"code":null,"e":7969,"s":7958,"text":"nidhi_biet"},{"code":null,"e":7985,"s":7969,"text":"yashbeersingh42"},{"code":null,"e":8003,"s":7985,"text":"divyeshrabadiya07"},{"code":null,"e":8011,"s":8003,"text":"rag2127"},{"code":null,"e":8025,"s":8011,"text":"divyesh072019"},{"code":null,"e":8035,"s":8025,"text":"rutvik_56"},{"code":null,"e":8045,"s":8035,"text":"pratham76"},{"code":null,"e":8054,"s":8045,"text":"suresh07"},{"code":null,"e":8067,"s":8054,"text":"princi singh"},{"code":null,"e":8080,"s":8067,"text":"nirajgusain5"},{"code":null,"e":8090,"s":8080,"text":"subham348"},{"code":null,"e":8111,"s":8090,"text":"mohammad shuaib sidd"},{"code":null,"e":8128,"s":8111,"text":"harendrakumar123"},{"code":null,"e":8138,"s":8128,"text":"Bit Magic"},{"code":null,"e":8148,"s":8138,"text":"Bit Magic"}],"string":"[\n {\n \"code\": null,\n \"e\": 54,\n \"s\": 26,\n \"text\": \"\\n07 Jun, 2022\"\n },\n {\n \"code\": null,\n \"e\": 262,\n \"s\": 54,\n \"text\": \"Given an integer n and two-bit positions p1 and p2 inside it, swap bits at the given positions. The given positions are from the least significant bit (lsb). For example, the position for lsb is 0.Examples: \"\n },\n {\n \"code\": null,\n \"e\": 408,\n \"s\": 262,\n \"text\": \"Input: n = 28, p1 = 0, p2 = 3Output: 21Explaination: 28 in binary is 11100. If we swap 0’th and 3rd digits, we get 10101 which is 21 in decimal.\"\n },\n {\n \"code\": null,\n \"e\": 448,\n \"s\": 408,\n \"text\": \"Input: n = 20, p1 = 2, p2 = 3Output: 24\"\n },\n {\n \"code\": null,\n \"e\": 691,\n \"s\": 448,\n \"text\": \"We strongly recommend you minimize your browser and try this yourself first.Method 1: The idea is to first find the bits, then use XOR based swapping concept, i..e., to swap two numbers ‘x’ and ‘y’, we do x = x ^ y, y = y ^ x, and x = x ^ y.\"\n },\n {\n \"code\": null,\n \"e\": 737,\n \"s\": 691,\n \"text\": \"Below is the implementation of the above idea\"\n },\n {\n \"code\": null,\n \"e\": 741,\n \"s\": 737,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 743,\n \"s\": 741,\n \"text\": \"C\"\n },\n {\n \"code\": null,\n \"e\": 748,\n \"s\": 743,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 751,\n \"s\": 748,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 762,\n \"s\": 751,\n \"text\": \"Javascript\"\n },\n {\n \"code\": null,\n \"e\": 770,\n \"s\": 762,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"// C++ program to swap bits in an integer#includeusing namespace std; // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); cout<<\\\"Result = \\\"<< res<<\\\" \\\"; return 0;} // This code is contributed by pratham76.\",\n \"e\": 1582,\n \"s\": 770,\n \"text\": null\n },\n {\n \"code\": \"// C program to swap bits in an integer#include // This function swaps bit at positions p1 and p2 in an integer nint swapBits(unsigned int n, unsigned int p1, unsigned int p2){ /* Move p1'th to rightmost side */ unsigned int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ unsigned int bit2 = (n >> p2) & 1; /* XOR the two bits */ unsigned int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ unsigned int result = n ^ x;} /* Driver program to test above function*/int main(){ int res = swapBits(28, 0, 3); printf(\\\"Result = %d \\\", res); return 0;}\",\n \"e\": 2323,\n \"s\": 1582,\n \"text\": null\n },\n {\n \"code\": \"// Java program to swap bits in an integerimport java.io.*; class GFG{ // This function swaps bit at// positions p1 and p2 in an integer nstatic int swapBits( int n, int p1, int p2){ /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result;} /* Driver code*/ public static void main (String[] args) { int res = swapBits(28, 0, 3); System.out.println (\\\"Result = \\\" + res); }} // This code is contributed by ajit..\",\n \"e\": 3111,\n \"s\": 2323,\n \"text\": null\n },\n {\n \"code\": \"// C# program to swap bits in an integerusing System;class GFG{ // This function swaps bit at // positions p1 and p2 in an integer n static int swapBits( int n, int p1, int p2) { /* Move p1'th to rightmost side */ int bit1 = (n >> p1) & 1; /* Move p2'th to rightmost side */ int bit2 = (n >> p2) & 1; /* XOR the two bits */ int x = (bit1 ^ bit2); /* Put the xor bit back to their original positions */ x = (x << p1) | (x << p2); /* XOR 'x' with the original number so that the two sets are swapped */ int result = n ^ x; return result; } /* Driver code*/ public static void Main(string[] args) { int res = swapBits(28, 0, 3); Console.Write(\\\"Result = \\\" + res); }} // This code is contributed by rutvik_56.\",\n \"e\": 3880,\n \"s\": 3111,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 4576,\n \"s\": 3880,\n \"text\": null\n },\n {\n \"code\": \"# Python3 program for the above approach # This function swaps bit at positions p1 and p2 in an integer ndef swapBits(n, p1, p2): # Move p1'th to rightmost side bit1 = (n >> p1) & 1 # Move p2'th to rightmost side bit2 = (n >> p2) & 1 # XOR the two bits x = (bit1 ^ bit2) # Put the xor bit back to their original positions x = (x << p1) | (x << p2) # XOR 'x' with the original number so that the # two sets are swapped result = n ^ x return result # Driver program to test above functionif __name__ == '__main__': res = swapBits(28, 0, 3) print(\\\"Result = \\\", res) # This code is contributed by nirajgusain5\",\n \"e\": 5229,\n \"s\": 4576,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 5242,\n \"s\": 5229,\n \"text\": \"Result = 21 \"\n },\n {\n \"code\": null,\n \"e\": 5286,\n \"s\": 5242,\n \"text\": \"Time Complexity: O(1)Auxiliary Space: O(1) \"\n },\n {\n \"code\": null,\n \"e\": 5290,\n \"s\": 5286,\n \"text\": \"C++\"\n },\n {\n \"code\": null,\n \"e\": 5292,\n \"s\": 5290,\n \"text\": \"C\"\n },\n {\n \"code\": null,\n \"e\": 5297,\n \"s\": 5292,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 5304,\n \"s\": 5297,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 5307,\n \"s\": 5304,\n \"text\": \"C#\"\n },\n {\n \"code\": null,\n \"e\": 5318,\n \"s\": 5307,\n \"text\": \"Javascript\"\n },\n {\n \"code\": \"//C++ code for swapping given bits of a number#includeusing namespace std;int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ cout << \\\"Result = \\\" << swapBits(28, 0, 3); return 0;}\",\n \"e\": 5683,\n \"s\": 5318,\n \"text\": null\n },\n {\n \"code\": \"//C code for swapping given bits of a number#includeint swapBits(int n, int p1, int p2){ //left-shift 1 p1 and p2 times //and using XOR if (((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2)) { n ^= 1 << p1; n ^= 1 << p2; } return n;} //Driver Codeint main(){ printf(\\\"Result = %d\\\", swapBits(28, 0, 3)); return 0;}\",\n \"e\": 6039,\n \"s\": 5683,\n \"text\": null\n },\n {\n \"code\": \"// Java code for swapping// given bits of a numberimport java.util.*;class Main{ public static int swapBits(int n, int p1, int p2){ //left-shift 1 p1 and // p2 times and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n;} // Driver codepublic static void main(String[] args){ System.out.print(\\\"Result = \\\" + swapBits(28, 0, 3));}} // This code is contributed by divyeshrabadiya07\",\n \"e\": 6585,\n \"s\": 6039,\n \"text\": null\n },\n {\n \"code\": \"# Python code for swapping given bits of a numberdef swapBits(n, p1, p2): # left-shift 1 p1 and p2 times # and using XOR if ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2)) >> p2): n ^= 1 << p1 n ^= 1 << p2 return n # Driver Codeprint(\\\"Result =\\\",swapBits(28, 0, 3)) # This code is contributed by rag2127\",\n \"e\": 6906,\n \"s\": 6585,\n \"text\": null\n },\n {\n \"code\": \"// C# code for swapping given bits of a numberusing System;class GFG { static int swapBits(int n, int p1, int p2) { // left-shift 1 p1 and p2 times // and using XOR int temp = ((n & (1 << p1)) >> p1) ^ ((n & (1 << p2))); if (temp >= 1) { n ^= 1 << p1; n ^= 1 << p2; } return n; } // Driver code static void Main() { Console.WriteLine(\\\"Result = \\\" + swapBits(28, 0, 3)); }} // This code is contributed by divyesh072019\",\n \"e\": 7404,\n \"s\": 6906,\n \"text\": null\n },\n {\n \"code\": \"\",\n \"e\": 7755,\n \"s\": 7404,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 7767,\n \"s\": 7755,\n \"text\": \"Result = 21\"\n },\n {\n \"code\": null,\n \"e\": 7810,\n \"s\": 7767,\n \"text\": \"Time Complexity: O(1)Auxiliary Space: O(1)\"\n },\n {\n \"code\": null,\n \"e\": 7937,\n \"s\": 7810,\n \"text\": \"Please write comments if you find anything incorrect, or if you want to share more information about the topic discussed above\"\n },\n {\n \"code\": null,\n \"e\": 7943,\n \"s\": 7937,\n \"text\": \"jit_t\"\n },\n {\n \"code\": null,\n \"e\": 7958,\n \"s\": 7943,\n \"text\": \"SHUBHAMSINGH10\"\n },\n {\n \"code\": null,\n \"e\": 7969,\n \"s\": 7958,\n \"text\": \"nidhi_biet\"\n },\n {\n \"code\": null,\n \"e\": 7985,\n \"s\": 7969,\n \"text\": \"yashbeersingh42\"\n },\n {\n \"code\": null,\n \"e\": 8003,\n \"s\": 7985,\n \"text\": \"divyeshrabadiya07\"\n },\n {\n \"code\": null,\n \"e\": 8011,\n \"s\": 8003,\n \"text\": \"rag2127\"\n },\n {\n \"code\": null,\n \"e\": 8025,\n \"s\": 8011,\n \"text\": \"divyesh072019\"\n },\n {\n \"code\": null,\n \"e\": 8035,\n \"s\": 8025,\n \"text\": \"rutvik_56\"\n },\n {\n \"code\": null,\n \"e\": 8045,\n \"s\": 8035,\n \"text\": \"pratham76\"\n },\n {\n \"code\": null,\n \"e\": 8054,\n \"s\": 8045,\n \"text\": \"suresh07\"\n },\n {\n \"code\": null,\n \"e\": 8067,\n \"s\": 8054,\n \"text\": \"princi singh\"\n },\n {\n \"code\": null,\n \"e\": 8080,\n \"s\": 8067,\n \"text\": \"nirajgusain5\"\n },\n {\n \"code\": null,\n \"e\": 8090,\n \"s\": 8080,\n \"text\": \"subham348\"\n },\n {\n \"code\": null,\n \"e\": 8111,\n \"s\": 8090,\n \"text\": \"mohammad shuaib sidd\"\n },\n {\n \"code\": null,\n \"e\": 8128,\n \"s\": 8111,\n \"text\": \"harendrakumar123\"\n },\n {\n \"code\": null,\n \"e\": 8138,\n \"s\": 8128,\n \"text\": \"Bit Magic\"\n },\n {\n \"code\": null,\n \"e\": 8148,\n \"s\": 8138,\n \"text\": \"Bit Magic\"\n }\n]"}}},{"rowIdx":33,"cells":{"title":{"kind":"string","value":"while loop in Julia"},"text":{"kind":"string","value":"19 Feb, 2020\nIn Julia, while loop is used to execute a block of statements repeatedly until a given condition is satisfied. And when the condition becomes false, the line immediately after the loop in the program is executed. If the condition is false when the while loop is executed first time, then the body of the loop will never be executed. Syntax :\nwhile expression\n\n statement(s)\n\nend\n\nHere, ‘while‘ is the keyword to start while loop, ‘expression‘ is the condition to be satisfied, and ‘end‘ is the keyword to end the while loop.\nNote: A block of code is the set of statements enclosed between the conditional statement and the ‘end‘ statement.\nExample 1:\n# Julia program to illustrate # the use of while loop # Declaring ArrayArray = [\"Geeks\", \"For\", \"Geeks\"] # Iterator Variablei = 1 # while loopwhile i <= length(Array) # Assigning value to object Object = Array[i] # Printing object println(\"$Object\") # Updating iterator globally global i += 1 # Ending Loopend\nOutput: Example 2:\n# Julia program to generate # the Fibonacci sequence # The length of Fibonacci sequencelength = 15 # The first two valuesa = 0b = 1 # Iterator Valueitr = 0 # while loop conditionwhile itr < length # Printing fibonacci value print(a, \", \") # Updating value c = a + b # Modify values global a = b global b = c # Updating iterator global itr += 1 # End of while loopend\nJulia-loops\nJulia\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":28,"s":0,"text":"\n19 Feb, 2020"},{"code":null,"e":371,"s":28,"text":"In Julia, while loop is used to execute a block of statements repeatedly until a given condition is satisfied. And when the condition becomes false, the line immediately after the loop in the program is executed. If the condition is false when the while loop is executed first time, then the body of the loop will never be executed. Syntax :"},{"code":null,"e":412,"s":371,"text":"while expression\n\n statement(s)\n\nend\n"},{"code":null,"e":557,"s":412,"text":"Here, ‘while‘ is the keyword to start while loop, ‘expression‘ is the condition to be satisfied, and ‘end‘ is the keyword to end the while loop."},{"code":null,"e":672,"s":557,"text":"Note: A block of code is the set of statements enclosed between the conditional statement and the ‘end‘ statement."},{"code":null,"e":683,"s":672,"text":"Example 1:"},{"code":"# Julia program to illustrate # the use of while loop # Declaring ArrayArray = [\"Geeks\", \"For\", \"Geeks\"] # Iterator Variablei = 1 # while loopwhile i <= length(Array) # Assigning value to object Object = Array[i] # Printing object println(\"$Object\") # Updating iterator globally global i += 1 # Ending Loopend","e":1033,"s":683,"text":null},{"code":null,"e":1052,"s":1033,"text":"Output: Example 2:"},{"code":"# Julia program to generate # the Fibonacci sequence # The length of Fibonacci sequencelength = 15 # The first two valuesa = 0b = 1 # Iterator Valueitr = 0 # while loop conditionwhile itr < length # Printing fibonacci value print(a, \", \") # Updating value c = a + b # Modify values global a = b global b = c # Updating iterator global itr += 1 # End of while loopend","e":1460,"s":1052,"text":null},{"code":null,"e":1472,"s":1460,"text":"Julia-loops"},{"code":null,"e":1478,"s":1472,"text":"Julia"}],"string":"[\n {\n \"code\": null,\n \"e\": 28,\n \"s\": 0,\n \"text\": \"\\n19 Feb, 2020\"\n },\n {\n \"code\": null,\n \"e\": 371,\n \"s\": 28,\n \"text\": \"In Julia, while loop is used to execute a block of statements repeatedly until a given condition is satisfied. And when the condition becomes false, the line immediately after the loop in the program is executed. If the condition is false when the while loop is executed first time, then the body of the loop will never be executed. Syntax :\"\n },\n {\n \"code\": null,\n \"e\": 412,\n \"s\": 371,\n \"text\": \"while expression\\n\\n statement(s)\\n\\nend\\n\"\n },\n {\n \"code\": null,\n \"e\": 557,\n \"s\": 412,\n \"text\": \"Here, ‘while‘ is the keyword to start while loop, ‘expression‘ is the condition to be satisfied, and ‘end‘ is the keyword to end the while loop.\"\n },\n {\n \"code\": null,\n \"e\": 672,\n \"s\": 557,\n \"text\": \"Note: A block of code is the set of statements enclosed between the conditional statement and the ‘end‘ statement.\"\n },\n {\n \"code\": null,\n \"e\": 683,\n \"s\": 672,\n \"text\": \"Example 1:\"\n },\n {\n \"code\": \"# Julia program to illustrate # the use of while loop # Declaring ArrayArray = [\\\"Geeks\\\", \\\"For\\\", \\\"Geeks\\\"] # Iterator Variablei = 1 # while loopwhile i <= length(Array) # Assigning value to object Object = Array[i] # Printing object println(\\\"$Object\\\") # Updating iterator globally global i += 1 # Ending Loopend\",\n \"e\": 1033,\n \"s\": 683,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1052,\n \"s\": 1033,\n \"text\": \"Output: Example 2:\"\n },\n {\n \"code\": \"# Julia program to generate # the Fibonacci sequence # The length of Fibonacci sequencelength = 15 # The first two valuesa = 0b = 1 # Iterator Valueitr = 0 # while loop conditionwhile itr < length # Printing fibonacci value print(a, \\\", \\\") # Updating value c = a + b # Modify values global a = b global b = c # Updating iterator global itr += 1 # End of while loopend\",\n \"e\": 1460,\n \"s\": 1052,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1472,\n \"s\": 1460,\n \"text\": \"Julia-loops\"\n },\n {\n \"code\": null,\n \"e\": 1478,\n \"s\": 1472,\n \"text\": \"Julia\"\n }\n]"}}},{"rowIdx":34,"cells":{"title":{"kind":"string","value":"HTML | vspace Attribute"},"text":{"kind":"string","value":"06 Jan, 2022\nThe HTML vspace Attribute is used to specify the number of whitespaces on bottom and top side of an image.\nNote: The HTML vspace Attribute not supported by HTML5\nSyntax:\n\nAttribute Values:\npixels: It specifies the number of whitespaces on top and bottom of an image in terms of pixels.\nExample:\n HTML img vspace Attribute

GeeksforGeeks

HTML vspace Attribute

Image without vspace Attribute

\"Submit\" It is a computer science portal for geeks

Image with vspace Attribute

\"Submit\" It is a computer science portal for geeks

\nOutput:\nSupported Browsers: The browser supported by HTML vspace Attribute are listed below:\nGoogle Chrome\nInternet Explorer\nFirefox\nSafari\nOpera\nManasChhabra2\nHTML-Attributes\nHTML\nWeb Technologies\nHTML\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},"parsed":{"kind":"list like","value":[{"code":null,"e":53,"s":25,"text":"\n06 Jan, 2022"},{"code":null,"e":166,"s":53,"text":"The HTML vspace Attribute is used to specify the number of whitespaces on bottom and top side of an image."},{"code":null,"e":222,"s":166,"text":"Note: The HTML vspace Attribute not supported by HTML5"},{"code":null,"e":230,"s":222,"text":"Syntax:"},{"code":null,"e":252,"s":230,"text":""},{"code":null,"e":270,"s":252,"text":"Attribute Values:"},{"code":null,"e":367,"s":270,"text":"pixels: It specifies the number of whitespaces on top and bottom of an image in terms of pixels."},{"code":null,"e":376,"s":367,"text":"Example:"},{"code":" HTML img vspace Attribute

GeeksforGeeks

HTML vspace Attribute

Image without vspace Attribute

\"Submit\" It is a computer science portal for geeks

Image with vspace Attribute

\"Submit\" It is a computer science portal for geeks

","e":1373,"s":376,"text":null},{"code":null,"e":1381,"s":1373,"text":"Output:"},{"code":null,"e":1472,"s":1381,"text":"Supported Browsers: The browser supported by HTML vspace Attribute are listed below:"},{"code":null,"e":1486,"s":1472,"text":"Google Chrome"},{"code":null,"e":1504,"s":1486,"text":"Internet Explorer"},{"code":null,"e":1512,"s":1504,"text":"Firefox"},{"code":null,"e":1519,"s":1512,"text":"Safari"},{"code":null,"e":1525,"s":1519,"text":"Opera"},{"code":null,"e":1539,"s":1525,"text":"ManasChhabra2"},{"code":null,"e":1555,"s":1539,"text":"HTML-Attributes"},{"code":null,"e":1560,"s":1555,"text":"HTML"},{"code":null,"e":1577,"s":1560,"text":"Web Technologies"},{"code":null,"e":1582,"s":1577,"text":"HTML"}],"string":"[\n {\n \"code\": null,\n \"e\": 53,\n \"s\": 25,\n \"text\": \"\\n06 Jan, 2022\"\n },\n {\n \"code\": null,\n \"e\": 166,\n \"s\": 53,\n \"text\": \"The HTML vspace Attribute is used to specify the number of whitespaces on bottom and top side of an image.\"\n },\n {\n \"code\": null,\n \"e\": 222,\n \"s\": 166,\n \"text\": \"Note: The HTML vspace Attribute not supported by HTML5\"\n },\n {\n \"code\": null,\n \"e\": 230,\n \"s\": 222,\n \"text\": \"Syntax:\"\n },\n {\n \"code\": null,\n \"e\": 252,\n \"s\": 230,\n \"text\": \"\"\n },\n {\n \"code\": null,\n \"e\": 270,\n \"s\": 252,\n \"text\": \"Attribute Values:\"\n },\n {\n \"code\": null,\n \"e\": 367,\n \"s\": 270,\n \"text\": \"pixels: It specifies the number of whitespaces on top and bottom of an image in terms of pixels.\"\n },\n {\n \"code\": null,\n \"e\": 376,\n \"s\": 367,\n \"text\": \"Example:\"\n },\n {\n \"code\": \" HTML img vspace Attribute

GeeksforGeeks

HTML vspace Attribute

Image without vspace Attribute

\\\"Submit\\\" It is a computer science portal for geeks

Image with vspace Attribute

\\\"Submit\\\" It is a computer science portal for geeks

\",\n \"e\": 1373,\n \"s\": 376,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 1381,\n \"s\": 1373,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 1472,\n \"s\": 1381,\n \"text\": \"Supported Browsers: The browser supported by HTML vspace Attribute are listed below:\"\n },\n {\n \"code\": null,\n \"e\": 1486,\n \"s\": 1472,\n \"text\": \"Google Chrome\"\n },\n {\n \"code\": null,\n \"e\": 1504,\n \"s\": 1486,\n \"text\": \"Internet Explorer\"\n },\n {\n \"code\": null,\n \"e\": 1512,\n \"s\": 1504,\n \"text\": \"Firefox\"\n },\n {\n \"code\": null,\n \"e\": 1519,\n \"s\": 1512,\n \"text\": \"Safari\"\n },\n {\n \"code\": null,\n \"e\": 1525,\n \"s\": 1519,\n \"text\": \"Opera\"\n },\n {\n \"code\": null,\n \"e\": 1539,\n \"s\": 1525,\n \"text\": \"ManasChhabra2\"\n },\n {\n \"code\": null,\n \"e\": 1555,\n \"s\": 1539,\n \"text\": \"HTML-Attributes\"\n },\n {\n \"code\": null,\n \"e\": 1560,\n \"s\": 1555,\n \"text\": \"HTML\"\n },\n {\n \"code\": null,\n \"e\": 1577,\n \"s\": 1560,\n \"text\": \"Web Technologies\"\n },\n {\n \"code\": null,\n \"e\": 1582,\n \"s\": 1577,\n \"text\": \"HTML\"\n }\n]"}}},{"rowIdx":35,"cells":{"title":{"kind":"string","value":"Predicting Amazon review scores using Hierarchical Attention Networks with PyTorch and Apache Mxnet | by Javier Rodriguez Zaurin | Towards Data Science"},"text":{"kind":"string","value":"This post and the code here are part of a larger repo that I have (very creatively) called “NLP-stuff”. As the name indicates, I include in that repo projects that I do and/or ideas that I have — as long as there is code associated with those ideas — that are related to NLP. In every directory, I have included a README file and a series of explanatory notebooks that I hope help explaining the code. I intend to keep adding projects throughout 2020, not necessarily the latest and/or most popular releases, but simply papers or algorithms I find interesting and useful. In particular, the code related to this post is in the directory amazon_reviews_classification_HAN.\nFirst things first, let’s start by acknowledging the relevant people that did the hard work. This post and the companion repo are based on the paper “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016). In addition, I have also used in my implementation the results, and code, presented in “Regularizing and Optimizing LSTM Language Models” (Stephen Merity, Nitish Shirish Keskar and Richard Socher, 2017). The dataset that I have used for this and other experiments in the repo is the Amazon product data (J. McAuley et al., 2015 and R. He, J. McAuley 2016), in particular the Clothing, Shoes and Jewellery dataset. I strongly recommend having a look at these papers and references therein.\nOnce that is done let’s start by describing the network architecture we will be implementing here. The following figure is Figure 2 in the Zichao Yang et al, paper.\nWe consider a document comprised of L sentences si and each sentence contains Ti words. w_it with t ∈ [1, T], represents the words in the i-th sentence. As shown in the figure, the authors used a word encoder (a bidirectional GRU, Bahdanau et al., 2014), along with a word attention mechanism to encode each sentence into a vector representation. These sentence representations are passed through a sentence encoder with a sentence attention mechanism resulting in a document vector representation. This final representation is passed to a fully connected layer with the corresponding activation function for prediction. The word “hierarchical” refers here to the process of encoding first sentences from words, and then documents from sentences, naturally following the “semantic hierarchy” in a document.\n1.1 The Attention Mechanism\nAssuming one is familiar with the GRU formulation (if not have a look here), all the math one needs to understand the attention mechanism is included below. The mathematical expressions I include here refer to the word attention mechanism. The sentence attention mechanism is identical, but at sentence level. Therefore, I believe explaining the following expressions, along with the code snippets below, will be enough to understand the full process. The first 3 expression are pretty standard:\nWhere x_it is the word embedding vector of word t in sentence i. The vectors h_it are the forward and backward output features from the bidirectional GRU, which are concatenated before applying attention. The attention mechanism is formulated as follows:\nFirst, the h_it features go through a one-layer MLP with a hyperbolic tangent function. This results in a hidden representation of h_it, u_it. Then, the importance of each word is measured as the dot product between u_it and a context vector u_w, obtaining a so-called normalised importance weight α_it. After that, the sentence vector si is computed as the weighted sum of the h_it features based on the normalised importance weights. For more details, please, read the paper, section 2.2 “Hierarchical Attention”. As mentioned earlier, the sentence attention mechanism is identical but at sentence level.\nWord and sentence attention can be coded as:\nPytorch:\nMxnet:\nwhere inp refers to h_it and h_i for word and sentence attention respectively.\nAs one can see, the Mxnet implementation is nearly identical to that in Pytorch, albeit with some subtle differences. This is going to be the case throughout the whole HAN implementation. However, I would like to add a few lines to clarify the following: this is my second “serious” dive into Mxnet and Gluon. The more I use it, the more I like it, but I am pretty sure that I could have written a better, more efficient code. With that in mind, if you, the reader, are a Mxnet user and have suggestions and comments, I would love to hear them.\n1.1.1 Word Encoder + Word Attention\nOnce we have the AttentionWithContext class, coding WordAttnNet (Word Encoder + Word Attention) is straightforward. The snippet below is a simplified version of that in the repo, but contains the main components. For the full version, please have a look at the code in the repo.\nPytorch\nMxnet\nYou will notice the presence of 3 dropout related parameters: embed_drop , weight_drop and locked_drop . I will describe them in detail in Section 2. For the time being, let’s ignore them and focus on the remaining components of the module.\nSimply, the input tokens ( X ) go through the embeddings lookup table ( word_embed). The resulting token embeddings go through the bidirectional GRU ( rnn) and the output of the GRU goes to AttentionWithContext ( word_attn ) which will return the importance weights (α), the sentence representation (s) and the hidden state h_n.\nNote that returning the hidden state is necessary since the document (the amazon review here) is comprised of a series of sentences. Therefore, the initial hidden state of sentence i+1 will be the last hidden state of sentence i. We could say that we will treat the documents themselves as “stateful”. I will come back to this later in the post.\n1.1.2 Sentence Encoder + Sentence Attention\nGiven the fact that we do not need an embedding lookup table for the sentence encoder, SentAttnNet (Sentence Encoder + Sentence Attention) is simply:\nPytorch\nMxnet\nHere, the network will receive the output of WordAttnNet ( X ), which will then go through the bidirectional GRU ( rnn ) and then through AttentionWithContext ( sent_attn ).\nAt this point, we have all the building blocks to code the HAN.\n1.1.3 Hierarchical Attention Networks (HANs)\nPytorch\nMxnet\nI believe it might be useful here to illustrate the flow of the data through the network with some numbers related to the dimensions of tensors as they navigate the network. Let’s assume we use batch sizes ( bsz ) of 32, token embedding of dim ( embed_dim ) 100 and GRUs with hidden size ( hidden_dim ) 64.\nThe input to HierAttnNet in the snippet before X is a tensor of dim (bzs, maxlen_doc, maxlen_sent) where maxlen_doc and maxlen_sent are the maximum number of sentences per document and words per sentence. Let’s assume that these numbers are 5 and 20. Therefore, X is here a tensor of dim (32, 5, 20) .\nThe first thing we do is to permute the axes, resulting in a tensor of dim (5, 32, 20) . This is because we are going to process one sentence at a time feeding the last hidden state of one sentence as the initial hidden state of the next sentence, in a “stateful” manner. This will happen within the loop in the forward pass.\nIn that loop, we are going to process one sentence at a time, i.e. a tensor of dim (32, 20) containing the i-th sentence for all 32 reviews in the batch. This tensor is then passed to wordattnnet , which is simply Word Encoder + Word Attention as described before. There, it will first go through the embeddings layer, resulting in a tensor of dim (32, 20, 100) . Then through the bidirectional GRU, resulting in a tensor of dim (32, 20, 128) and finally through the attention mechanism, resulting in a tensor of dim (32, 1, 128) . This last tensor is si in equation 7 in the Zichao Yang, et al paper, and corresponds to the i-th sentence vector representation.\nAfter running the loop we will have maxlen_doc (i.e. 5) tensors of dim (32, 1, 128) that will be concatenated along the 2nd dimension, resulting in a tensor of dim (32, 5, 128) → (bsz, maxlen_doc, hidden_dim*2). This tensor is then passed through sentattnnet , which is simply Sentence Encoder + Sentence Attention as described before. There it will first go through the bidirectional GRU, resulting in a tensor of dim (32, 5, 128) and finally through the attention mechanism resulting in a tensor of dim (32, 128) . This last tensor will be the v in the equation 10 in their paper.\nFinally, v is then passed through a fully connected layer and a Softmax function for prediction.\nWhen I started to run experiments I noticed that the model overfitted quite early during training. The best validation loss and accuracy happened within the first couple of epochs, or even after the first epoch. When overfitting occurs there are a number of options:\nReduce model complexity: I explore this by running a number of models with a small number of embeddings and/or hidden sizes.\nEarly Stopping: this is always used via an early_stop function.\nAdditional regularisation, such as dropout, label smoothing (Christian Szegedy et al, 2015) or data augmentation. I write “additional” because I already used weight decay.\nI have not explored label smoothing or data augmentation in this exercise. If you want to dig a bit more into how to implement label smoothing in Pytorch, have a look at this repo. In the case of Mxnet, the gluonnlp API has its own LabelSmoothing class.\nRegarding data augmentation, the truth is that I have not tried it here and perhaps I should. Not only because it normally leads to notable improvements in terms of model generalisation, but moreover because I already have most of the code from another experiment where I implemented EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks (Jason Wei and Kai Zou 2019). Nonetheless, one has to stop somewhere and I decided to focus on exploring different dropout mechanisms.\nThe 3 different forms of dropout I used here are: embedding dropout, locked dropout and weight dropout. The code that I used is taken directly from the salesforce repo corresponding to the implementation of the AWD-LTSM (Merity, hirish Keskar and Socher, 2017). In this section, I will focus on discussing the Pytorch implementation, but I will also include information regarding Mxnet’s implementation. Note that these dropout mechanisms were initially thought and implemented in the context of language models. However, there is no reason why they should not work here (or at least no reason why we should not try them).\n2.1 Embedding Dropout\nThis is discussed in detail in Section 4.3. in the Merity et al paper and is based in the work of Gal & Ghahramani (2016). No one better than the authors themselves to explain it. In their own words: “This is equivalent to performing dropout on the embedding matrix at a word level, where the dropout is broadcast across all the word vector’s embedding. [...]”\nIn code (the code below is a simplified version of that in the original repo):\nBasically, we create a mask of 0s and 1s along the 1st dimension of the embeddings tensor (the “word” dimension) and then we expand that mask along the second dimension (the “embedding” dimension), scaling the remaining weights accordingly. As the authors said, we drop words.\n2.2 Locked Dropout\nThis is also based on the work of Gal & Ghahramani (2016). Again in the words of the authors: “[...] sample a binary dropout mask only once upon the first call and then to repeatedly use that locked dropout mask for all repeated connections within the forward and backward pass”.\nIn code:\nSimply,LockedDropoutwill receive a 3-dim tensor, it will then generate a mask along the second dimension and expand that mask along the first dimension. For example, when applied to a tensor like (batch_size, seq_length, embed_dim), it will create a mask of dim (1, seq_length, embed_dim) and apply it to the whole batch. Mxnet’s nn.Dropout module has an axes parameter that directly implements this type of dropout.\nAnd finally...\n2.3. Weight Dropout\nThis is discussed in Section 2 in their paper. Once again, in their own words: “We propose the use of DropConnect (Wan et al., 2013) on the recurrent hidden to hidden weight matrices which do not require any modifications to an RNN’s formulation.”\nIn code (the code below is a simplified version of that in the original repo):\nWeightDrop will first copy and register the hidden-to-hidden weights (or in general terms the weights in the List weights) with a suffix _raw (line 14). Then, it will apply dropout and assign the weights again to the module (line 25 if variationalor 27 otherwise). As shown in the snippet, the variational option does the same as discussed before in the case of Embedding Dropout, i.e. generates a mask along the first dimension of the tensor and expands (or broadcasts) along the second dimension.\nThere are a couple of drawbacks to this implementation. In the first place, given some input weights, the final model will contain the original weights (referred as weight_name_raw ) and those with dropout (refer as weight_name ), which is not very efficient. Secondly, it changes the name of the parameters, adding ‘ module’ to the original name.\nTo be honest, these are not major drawbacks at all, but I can use them as an excuse to introduce another two implementations that are perhaps a bit better (although of course based on the original one). One is the implementation within the great text API at the fastai library. I guess at this point everyone knows about this library, but if you don’t let me write a couple of lines here. I find this library excellent, not only for the high level APIs that it offers, or the clever defaults, but also because there are a lot of little gems hidden in the source code. If you are not familiar with the library, give it a go, there is no turning back.\nAnother nice implementation is the function apply_weight_drop at the Mxnet’s gluonnlp API, which I used here. In fact, in their implementation of the AWDRNN language model this function is used for both the embedding and the hidden-to-hidden weight dropout. It is available through their utils module:\nfrom gluonnlp.model.utils import apply_weight_drop\nAs far as implementation goes, this is it. Time to run some experiments.\n3.1. Results\nI eventually recorded 59 experiments (I ran a few more), 40 of them using the Pytorch implementation and 19 using Mxnet. Throughout the experiments I used different batch sizes, learning rates, embedding dimensions, GRU hidden sizes, dropout rates, learning rate schedulers, optimisers, etc. They are all shown in Tables 1 and 2 in the notebook 04_Review_Score_Prediction_Results.ipynb. The best results on the test dataset for each implementation are shown in the table below, along with the best result I obtained from previous attempts using tf-idf along with LightGBM and Hyperopt for the classification and hyper-parameter optimisation tasks.\nIn the first place, it is worth reiterating that I only run 19 experiments with the Mxnet implementation. This is in part due to the fact that, as I mentioned earlier in the post, I have more experience with Pytorch than with Mxnet and Gluon, which influenced the corresponding experimentation. Therefore, it is quite possible that I missed a minor tweak to the Mxnet models that would have lead to better results than those in the table.\nOther than that we can see that the HAN-Pytorch model performs better than a thoroughly tuned tf-idf+LighGBM model on the test dataset for all, accuracy, F1 score and precision. Therefore, the next immediate question most will be asking is: is it worth using HAN over tf-idf+LightGBM (or your favourite classifier)? And the answer is, as with most things in life, “it depends”.\nIt is true that HANs perform better, but the increase is relatively small. In general, leaving aside the particular case of the Amazon reviews, if in your business a ~3% F1 score is important (i.e. leads to a sizeable increase in revenue, savings or some other benefits) then there is no question, one would use the DL approach. On top of that, attention mechanisms might give you some additional, useful information (such as the expressions within the text that lead to a certain classification) beyond just the keywords that one would obtain by using approaches such as tf-idf (or topic modelling).\nFinally, my implementation of HANs is inefficient (see next section). Even in that scenario, the results presented in the table are always obtained in less than 10 epochs and each epoch runs in around 3min (or less depending on the batch sizes) on a Tesla K80. Therefore, this is certainly not a computationally expensive algorithm to train and performs well. In summary, I’d say that HANs are a good algorithm to have in your repertoire when it comes to perform text classification.\n3.2 Visualising Attention\nLet’s now have a look at the attention weights, in particular to the word and sentence importance weights (α).\nFigure 2 shows both word and sentence attention weights for two reviews that were classified correctly. The xxmaj token is a special token introduced by the fastai tokenizer to indicate that the next token starts with a capital letter. In addition, it is worth mentioning that in the original dataset review scores range from 1–5 stars. During preprocessing, I merge reviews with 1 and 2 starts into one class and re-label the classes to start from 0 (see here for details). Therefore, the final number of classes is 4: {0, 1, 2, 3}.\nThe figure shows how, when predicting the review score, the HAN places attention to phrases and constructions like “fit was perfect”, “very superior” or “rubs [...] wrong places”, as well as isolated words like “bought” or “not”. In addition, we can see that in the top plot, a bit more attention is placed in the 3rd sentence relative to the other 3.\nFigure 3 shows both word and sentence attention weights for two reviews that were misclassified. The top review was predicted as 0 while the true score was 3 (real score in the original dataset is 5). Someone found those boots “yuck”, “disappointing” and “bad” yet gave them a 5 star score. The review at the bottom was predicted as 3 while the true score was 0 (real score in the original dataset is 1). It is easy to understand why the HAN misclassified this review mostly based on the first sentence, where it places the highest attention.\nNonetheless, the figures show that the attention mechanism works well, capturing the relevant pieces in the reviews that lead to a certain classification. Notebook 05_Visualizing_Attention.ipynb contains the code that I used to generate these plots.\nAt this stage, there are a few comments worth making. First of all, I ran all the experiments manually (with a bash file), which is not the best way of optimising the hyper-parameters of the model. Ideally, one would like to wrap up the train and validation processes in an objective function and use Hyperopt, as I did with all the other experiments in the repo that focus on text classification. I will include a .py script to do that in the near future.\nOn the other hand, looking at figures 2 and 3 one can see that attention is normally focused on isolated words or constructions and phrases or 2 or 3 words. Therefore, one might think that using a non-DL approach along with n-grams might improve the results in the table. I actually did that in this notebook and the difference between using or not using n-grams (in particular bigrams via gensim.models.phrases ) is negligible.\nOther issues worth discussing are related to model generalisation and efficiency. For example, I already mentioned that one could use label smoothing and data augmentation to add regularisation. In fact, even after adding some dropout, the best validation loss and metrics are still obtained early during training, moreover in the case of the Mxnet implementation. This is not necessarily bad and might simply reflect the fact that the model reaches its best performance just after a few epochs. However, more exploration is required.\nIn addition, if you have a look at the details of my implementation, you will realise that the input tensors have a lot of unnecessary padding. Nothing will be learned from this padding but still has to be processed, i.e. this is inefficient for the GPU. To remedy this situation, one could group reviews of similar lengths into buckets and pad accordingly, reducing the computation required to process the documents. Furthermore, one could adjust both learning rate and batch size according to the document length. All these approaches have already been used to build language models (e.g see this presentation) and are readily available at the gluonnlp API. At this point, I have only scratched the surface of what this API can do and I am looking forward to more experimentation in the near future.\nI have implemented “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016) using Pytorch and Mxnet to predict Amazon reviews scores, and compared the results with those of previous implementations that did not involve Deep Learning. HANs perform better across all the evaluation metrics, are relatively easy to implement and fast to train. Therefore, I believe this is an algorithm worth having in the repertoire for text classification tasks.\nOther than that, and as always, I hope you found this post useful.\nAny comments, suggestions, please email me at: jrzaurin@gmail.com or even better open an issue in the repo.\nReferences\nDzmitry Bahdanau, KyungHyun Cho, Yoshua Bengio 2016. neural machine translation by jointly learning to align and translate. https://arxiv.org/abs/1409.0473\nYarin Gal, Zoubin Ghahramani 2015. A Theoretically Grounded Application of Dropout in Recurrent Neural Networks. https://arxiv.org/abs/1512.05287.\nRuining He, Julian McAuley 2016. Ups and Downs: Modeling the Visual Evolution of Fashion Trends with One-Class Collaborative Filtering. https://arxiv.org/abs/1602.01585\nJulian McAuley , Christopher Targett , Qinfeng (‘Javen’) Shi , and Anton van den Hengel 2015. Image-based Recommendations on Styles and Substitutes. https://arxiv.org/abs/1506.04757\nStephen Merity, Nitish Shirish Keskar, Richard Socher 2017. Regularizing and Optimizing LSTM Language Models. https://arxiv.org/abs/1708.02182\nChristian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna 2015. Rethinking the Inception Architecture for Computer Vision. https://arxiv.org/abs/1512.00567\nLi Wan, Matthew Zeiler, Sixin Zhang, Yann LeCun, Rob Fergus 2013. Regularization of Neural Networks using DropConnect. http://proceedings.mlr.press/v28/wan13.html\nJason Wei, Kai Zou 2019. EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks. https://arxiv.org/abs/1901.11196\nZichao Yang , Diyi Yang , Chris Dyer , Xiaodong He , Alex Smola , Eduard Hovy 2016. Hierarchical Attention Networks for Document Classification. https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf"},"parsed":{"kind":"list like","value":[{"code":null,"e":843,"s":171,"text":"This post and the code here are part of a larger repo that I have (very creatively) called “NLP-stuff”. As the name indicates, I include in that repo projects that I do and/or ideas that I have — as long as there is code associated with those ideas — that are related to NLP. In every directory, I have included a README file and a series of explanatory notebooks that I hope help explaining the code. I intend to keep adding projects throughout 2020, not necessarily the latest and/or most popular releases, but simply papers or algorithms I find interesting and useful. In particular, the code related to this post is in the directory amazon_reviews_classification_HAN."},{"code":null,"e":1571,"s":843,"text":"First things first, let’s start by acknowledging the relevant people that did the hard work. This post and the companion repo are based on the paper “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016). In addition, I have also used in my implementation the results, and code, presented in “Regularizing and Optimizing LSTM Language Models” (Stephen Merity, Nitish Shirish Keskar and Richard Socher, 2017). The dataset that I have used for this and other experiments in the repo is the Amazon product data (J. McAuley et al., 2015 and R. He, J. McAuley 2016), in particular the Clothing, Shoes and Jewellery dataset. I strongly recommend having a look at these papers and references therein."},{"code":null,"e":1736,"s":1571,"text":"Once that is done let’s start by describing the network architecture we will be implementing here. The following figure is Figure 2 in the Zichao Yang et al, paper."},{"code":null,"e":2543,"s":1736,"text":"We consider a document comprised of L sentences si and each sentence contains Ti words. w_it with t ∈ [1, T], represents the words in the i-th sentence. As shown in the figure, the authors used a word encoder (a bidirectional GRU, Bahdanau et al., 2014), along with a word attention mechanism to encode each sentence into a vector representation. These sentence representations are passed through a sentence encoder with a sentence attention mechanism resulting in a document vector representation. This final representation is passed to a fully connected layer with the corresponding activation function for prediction. The word “hierarchical” refers here to the process of encoding first sentences from words, and then documents from sentences, naturally following the “semantic hierarchy” in a document."},{"code":null,"e":2571,"s":2543,"text":"1.1 The Attention Mechanism"},{"code":null,"e":3067,"s":2571,"text":"Assuming one is familiar with the GRU formulation (if not have a look here), all the math one needs to understand the attention mechanism is included below. The mathematical expressions I include here refer to the word attention mechanism. The sentence attention mechanism is identical, but at sentence level. Therefore, I believe explaining the following expressions, along with the code snippets below, will be enough to understand the full process. The first 3 expression are pretty standard:"},{"code":null,"e":3322,"s":3067,"text":"Where x_it is the word embedding vector of word t in sentence i. The vectors h_it are the forward and backward output features from the bidirectional GRU, which are concatenated before applying attention. The attention mechanism is formulated as follows:"},{"code":null,"e":3929,"s":3322,"text":"First, the h_it features go through a one-layer MLP with a hyperbolic tangent function. This results in a hidden representation of h_it, u_it. Then, the importance of each word is measured as the dot product between u_it and a context vector u_w, obtaining a so-called normalised importance weight α_it. After that, the sentence vector si is computed as the weighted sum of the h_it features based on the normalised importance weights. For more details, please, read the paper, section 2.2 “Hierarchical Attention”. As mentioned earlier, the sentence attention mechanism is identical but at sentence level."},{"code":null,"e":3974,"s":3929,"text":"Word and sentence attention can be coded as:"},{"code":null,"e":3983,"s":3974,"text":"Pytorch:"},{"code":null,"e":3990,"s":3983,"text":"Mxnet:"},{"code":null,"e":4069,"s":3990,"text":"where inp refers to h_it and h_i for word and sentence attention respectively."},{"code":null,"e":4614,"s":4069,"text":"As one can see, the Mxnet implementation is nearly identical to that in Pytorch, albeit with some subtle differences. This is going to be the case throughout the whole HAN implementation. However, I would like to add a few lines to clarify the following: this is my second “serious” dive into Mxnet and Gluon. The more I use it, the more I like it, but I am pretty sure that I could have written a better, more efficient code. With that in mind, if you, the reader, are a Mxnet user and have suggestions and comments, I would love to hear them."},{"code":null,"e":4650,"s":4614,"text":"1.1.1 Word Encoder + Word Attention"},{"code":null,"e":4929,"s":4650,"text":"Once we have the AttentionWithContext class, coding WordAttnNet (Word Encoder + Word Attention) is straightforward. The snippet below is a simplified version of that in the repo, but contains the main components. For the full version, please have a look at the code in the repo."},{"code":null,"e":4937,"s":4929,"text":"Pytorch"},{"code":null,"e":4943,"s":4937,"text":"Mxnet"},{"code":null,"e":5184,"s":4943,"text":"You will notice the presence of 3 dropout related parameters: embed_drop , weight_drop and locked_drop . I will describe them in detail in Section 2. For the time being, let’s ignore them and focus on the remaining components of the module."},{"code":null,"e":5513,"s":5184,"text":"Simply, the input tokens ( X ) go through the embeddings lookup table ( word_embed). The resulting token embeddings go through the bidirectional GRU ( rnn) and the output of the GRU goes to AttentionWithContext ( word_attn ) which will return the importance weights (α), the sentence representation (s) and the hidden state h_n."},{"code":null,"e":5859,"s":5513,"text":"Note that returning the hidden state is necessary since the document (the amazon review here) is comprised of a series of sentences. Therefore, the initial hidden state of sentence i+1 will be the last hidden state of sentence i. We could say that we will treat the documents themselves as “stateful”. I will come back to this later in the post."},{"code":null,"e":5903,"s":5859,"text":"1.1.2 Sentence Encoder + Sentence Attention"},{"code":null,"e":6053,"s":5903,"text":"Given the fact that we do not need an embedding lookup table for the sentence encoder, SentAttnNet (Sentence Encoder + Sentence Attention) is simply:"},{"code":null,"e":6061,"s":6053,"text":"Pytorch"},{"code":null,"e":6067,"s":6061,"text":"Mxnet"},{"code":null,"e":6241,"s":6067,"text":"Here, the network will receive the output of WordAttnNet ( X ), which will then go through the bidirectional GRU ( rnn ) and then through AttentionWithContext ( sent_attn )."},{"code":null,"e":6305,"s":6241,"text":"At this point, we have all the building blocks to code the HAN."},{"code":null,"e":6350,"s":6305,"text":"1.1.3 Hierarchical Attention Networks (HANs)"},{"code":null,"e":6358,"s":6350,"text":"Pytorch"},{"code":null,"e":6364,"s":6358,"text":"Mxnet"},{"code":null,"e":6671,"s":6364,"text":"I believe it might be useful here to illustrate the flow of the data through the network with some numbers related to the dimensions of tensors as they navigate the network. Let’s assume we use batch sizes ( bsz ) of 32, token embedding of dim ( embed_dim ) 100 and GRUs with hidden size ( hidden_dim ) 64."},{"code":null,"e":6973,"s":6671,"text":"The input to HierAttnNet in the snippet before X is a tensor of dim (bzs, maxlen_doc, maxlen_sent) where maxlen_doc and maxlen_sent are the maximum number of sentences per document and words per sentence. Let’s assume that these numbers are 5 and 20. Therefore, X is here a tensor of dim (32, 5, 20) ."},{"code":null,"e":7299,"s":6973,"text":"The first thing we do is to permute the axes, resulting in a tensor of dim (5, 32, 20) . This is because we are going to process one sentence at a time feeding the last hidden state of one sentence as the initial hidden state of the next sentence, in a “stateful” manner. This will happen within the loop in the forward pass."},{"code":null,"e":7961,"s":7299,"text":"In that loop, we are going to process one sentence at a time, i.e. a tensor of dim (32, 20) containing the i-th sentence for all 32 reviews in the batch. This tensor is then passed to wordattnnet , which is simply Word Encoder + Word Attention as described before. There, it will first go through the embeddings layer, resulting in a tensor of dim (32, 20, 100) . Then through the bidirectional GRU, resulting in a tensor of dim (32, 20, 128) and finally through the attention mechanism, resulting in a tensor of dim (32, 1, 128) . This last tensor is si in equation 7 in the Zichao Yang, et al paper, and corresponds to the i-th sentence vector representation."},{"code":null,"e":8544,"s":7961,"text":"After running the loop we will have maxlen_doc (i.e. 5) tensors of dim (32, 1, 128) that will be concatenated along the 2nd dimension, resulting in a tensor of dim (32, 5, 128) → (bsz, maxlen_doc, hidden_dim*2). This tensor is then passed through sentattnnet , which is simply Sentence Encoder + Sentence Attention as described before. There it will first go through the bidirectional GRU, resulting in a tensor of dim (32, 5, 128) and finally through the attention mechanism resulting in a tensor of dim (32, 128) . This last tensor will be the v in the equation 10 in their paper."},{"code":null,"e":8641,"s":8544,"text":"Finally, v is then passed through a fully connected layer and a Softmax function for prediction."},{"code":null,"e":8908,"s":8641,"text":"When I started to run experiments I noticed that the model overfitted quite early during training. The best validation loss and accuracy happened within the first couple of epochs, or even after the first epoch. When overfitting occurs there are a number of options:"},{"code":null,"e":9033,"s":8908,"text":"Reduce model complexity: I explore this by running a number of models with a small number of embeddings and/or hidden sizes."},{"code":null,"e":9097,"s":9033,"text":"Early Stopping: this is always used via an early_stop function."},{"code":null,"e":9269,"s":9097,"text":"Additional regularisation, such as dropout, label smoothing (Christian Szegedy et al, 2015) or data augmentation. I write “additional” because I already used weight decay."},{"code":null,"e":9523,"s":9269,"text":"I have not explored label smoothing or data augmentation in this exercise. If you want to dig a bit more into how to implement label smoothing in Pytorch, have a look at this repo. In the case of Mxnet, the gluonnlp API has its own LabelSmoothing class."},{"code":null,"e":10035,"s":9523,"text":"Regarding data augmentation, the truth is that I have not tried it here and perhaps I should. Not only because it normally leads to notable improvements in terms of model generalisation, but moreover because I already have most of the code from another experiment where I implemented EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks (Jason Wei and Kai Zou 2019). Nonetheless, one has to stop somewhere and I decided to focus on exploring different dropout mechanisms."},{"code":null,"e":10658,"s":10035,"text":"The 3 different forms of dropout I used here are: embedding dropout, locked dropout and weight dropout. The code that I used is taken directly from the salesforce repo corresponding to the implementation of the AWD-LTSM (Merity, hirish Keskar and Socher, 2017). In this section, I will focus on discussing the Pytorch implementation, but I will also include information regarding Mxnet’s implementation. Note that these dropout mechanisms were initially thought and implemented in the context of language models. However, there is no reason why they should not work here (or at least no reason why we should not try them)."},{"code":null,"e":10680,"s":10658,"text":"2.1 Embedding Dropout"},{"code":null,"e":11041,"s":10680,"text":"This is discussed in detail in Section 4.3. in the Merity et al paper and is based in the work of Gal & Ghahramani (2016). No one better than the authors themselves to explain it. In their own words: “This is equivalent to performing dropout on the embedding matrix at a word level, where the dropout is broadcast across all the word vector’s embedding. [...]”"},{"code":null,"e":11120,"s":11041,"text":"In code (the code below is a simplified version of that in the original repo):"},{"code":null,"e":11397,"s":11120,"text":"Basically, we create a mask of 0s and 1s along the 1st dimension of the embeddings tensor (the “word” dimension) and then we expand that mask along the second dimension (the “embedding” dimension), scaling the remaining weights accordingly. As the authors said, we drop words."},{"code":null,"e":11416,"s":11397,"text":"2.2 Locked Dropout"},{"code":null,"e":11696,"s":11416,"text":"This is also based on the work of Gal & Ghahramani (2016). Again in the words of the authors: “[...] sample a binary dropout mask only once upon the first call and then to repeatedly use that locked dropout mask for all repeated connections within the forward and backward pass”."},{"code":null,"e":11705,"s":11696,"text":"In code:"},{"code":null,"e":12122,"s":11705,"text":"Simply,LockedDropoutwill receive a 3-dim tensor, it will then generate a mask along the second dimension and expand that mask along the first dimension. For example, when applied to a tensor like (batch_size, seq_length, embed_dim), it will create a mask of dim (1, seq_length, embed_dim) and apply it to the whole batch. Mxnet’s nn.Dropout module has an axes parameter that directly implements this type of dropout."},{"code":null,"e":12137,"s":12122,"text":"And finally..."},{"code":null,"e":12157,"s":12137,"text":"2.3. Weight Dropout"},{"code":null,"e":12405,"s":12157,"text":"This is discussed in Section 2 in their paper. Once again, in their own words: “We propose the use of DropConnect (Wan et al., 2013) on the recurrent hidden to hidden weight matrices which do not require any modifications to an RNN’s formulation.”"},{"code":null,"e":12484,"s":12405,"text":"In code (the code below is a simplified version of that in the original repo):"},{"code":null,"e":12983,"s":12484,"text":"WeightDrop will first copy and register the hidden-to-hidden weights (or in general terms the weights in the List weights) with a suffix _raw (line 14). Then, it will apply dropout and assign the weights again to the module (line 25 if variationalor 27 otherwise). As shown in the snippet, the variational option does the same as discussed before in the case of Embedding Dropout, i.e. generates a mask along the first dimension of the tensor and expands (or broadcasts) along the second dimension."},{"code":null,"e":13331,"s":12983,"text":"There are a couple of drawbacks to this implementation. In the first place, given some input weights, the final model will contain the original weights (referred as weight_name_raw ) and those with dropout (refer as weight_name ), which is not very efficient. Secondly, it changes the name of the parameters, adding ‘ module’ to the original name."},{"code":null,"e":13981,"s":13331,"text":"To be honest, these are not major drawbacks at all, but I can use them as an excuse to introduce another two implementations that are perhaps a bit better (although of course based on the original one). One is the implementation within the great text API at the fastai library. I guess at this point everyone knows about this library, but if you don’t let me write a couple of lines here. I find this library excellent, not only for the high level APIs that it offers, or the clever defaults, but also because there are a lot of little gems hidden in the source code. If you are not familiar with the library, give it a go, there is no turning back."},{"code":null,"e":14283,"s":13981,"text":"Another nice implementation is the function apply_weight_drop at the Mxnet’s gluonnlp API, which I used here. In fact, in their implementation of the AWDRNN language model this function is used for both the embedding and the hidden-to-hidden weight dropout. It is available through their utils module:"},{"code":null,"e":14334,"s":14283,"text":"from gluonnlp.model.utils import apply_weight_drop"},{"code":null,"e":14407,"s":14334,"text":"As far as implementation goes, this is it. Time to run some experiments."},{"code":null,"e":14420,"s":14407,"text":"3.1. Results"},{"code":null,"e":15068,"s":14420,"text":"I eventually recorded 59 experiments (I ran a few more), 40 of them using the Pytorch implementation and 19 using Mxnet. Throughout the experiments I used different batch sizes, learning rates, embedding dimensions, GRU hidden sizes, dropout rates, learning rate schedulers, optimisers, etc. They are all shown in Tables 1 and 2 in the notebook 04_Review_Score_Prediction_Results.ipynb. The best results on the test dataset for each implementation are shown in the table below, along with the best result I obtained from previous attempts using tf-idf along with LightGBM and Hyperopt for the classification and hyper-parameter optimisation tasks."},{"code":null,"e":15507,"s":15068,"text":"In the first place, it is worth reiterating that I only run 19 experiments with the Mxnet implementation. This is in part due to the fact that, as I mentioned earlier in the post, I have more experience with Pytorch than with Mxnet and Gluon, which influenced the corresponding experimentation. Therefore, it is quite possible that I missed a minor tweak to the Mxnet models that would have lead to better results than those in the table."},{"code":null,"e":15885,"s":15507,"text":"Other than that we can see that the HAN-Pytorch model performs better than a thoroughly tuned tf-idf+LighGBM model on the test dataset for all, accuracy, F1 score and precision. Therefore, the next immediate question most will be asking is: is it worth using HAN over tf-idf+LightGBM (or your favourite classifier)? And the answer is, as with most things in life, “it depends”."},{"code":null,"e":16486,"s":15885,"text":"It is true that HANs perform better, but the increase is relatively small. In general, leaving aside the particular case of the Amazon reviews, if in your business a ~3% F1 score is important (i.e. leads to a sizeable increase in revenue, savings or some other benefits) then there is no question, one would use the DL approach. On top of that, attention mechanisms might give you some additional, useful information (such as the expressions within the text that lead to a certain classification) beyond just the keywords that one would obtain by using approaches such as tf-idf (or topic modelling)."},{"code":null,"e":16970,"s":16486,"text":"Finally, my implementation of HANs is inefficient (see next section). Even in that scenario, the results presented in the table are always obtained in less than 10 epochs and each epoch runs in around 3min (or less depending on the batch sizes) on a Tesla K80. Therefore, this is certainly not a computationally expensive algorithm to train and performs well. In summary, I’d say that HANs are a good algorithm to have in your repertoire when it comes to perform text classification."},{"code":null,"e":16996,"s":16970,"text":"3.2 Visualising Attention"},{"code":null,"e":17107,"s":16996,"text":"Let’s now have a look at the attention weights, in particular to the word and sentence importance weights (α)."},{"code":null,"e":17641,"s":17107,"text":"Figure 2 shows both word and sentence attention weights for two reviews that were classified correctly. The xxmaj token is a special token introduced by the fastai tokenizer to indicate that the next token starts with a capital letter. In addition, it is worth mentioning that in the original dataset review scores range from 1–5 stars. During preprocessing, I merge reviews with 1 and 2 starts into one class and re-label the classes to start from 0 (see here for details). Therefore, the final number of classes is 4: {0, 1, 2, 3}."},{"code":null,"e":17993,"s":17641,"text":"The figure shows how, when predicting the review score, the HAN places attention to phrases and constructions like “fit was perfect”, “very superior” or “rubs [...] wrong places”, as well as isolated words like “bought” or “not”. In addition, we can see that in the top plot, a bit more attention is placed in the 3rd sentence relative to the other 3."},{"code":null,"e":18536,"s":17993,"text":"Figure 3 shows both word and sentence attention weights for two reviews that were misclassified. The top review was predicted as 0 while the true score was 3 (real score in the original dataset is 5). Someone found those boots “yuck”, “disappointing” and “bad” yet gave them a 5 star score. The review at the bottom was predicted as 3 while the true score was 0 (real score in the original dataset is 1). It is easy to understand why the HAN misclassified this review mostly based on the first sentence, where it places the highest attention."},{"code":null,"e":18786,"s":18536,"text":"Nonetheless, the figures show that the attention mechanism works well, capturing the relevant pieces in the reviews that lead to a certain classification. Notebook 05_Visualizing_Attention.ipynb contains the code that I used to generate these plots."},{"code":null,"e":19243,"s":18786,"text":"At this stage, there are a few comments worth making. First of all, I ran all the experiments manually (with a bash file), which is not the best way of optimising the hyper-parameters of the model. Ideally, one would like to wrap up the train and validation processes in an objective function and use Hyperopt, as I did with all the other experiments in the repo that focus on text classification. I will include a .py script to do that in the near future."},{"code":null,"e":19672,"s":19243,"text":"On the other hand, looking at figures 2 and 3 one can see that attention is normally focused on isolated words or constructions and phrases or 2 or 3 words. Therefore, one might think that using a non-DL approach along with n-grams might improve the results in the table. I actually did that in this notebook and the difference between using or not using n-grams (in particular bigrams via gensim.models.phrases ) is negligible."},{"code":null,"e":20207,"s":19672,"text":"Other issues worth discussing are related to model generalisation and efficiency. For example, I already mentioned that one could use label smoothing and data augmentation to add regularisation. In fact, even after adding some dropout, the best validation loss and metrics are still obtained early during training, moreover in the case of the Mxnet implementation. This is not necessarily bad and might simply reflect the fact that the model reaches its best performance just after a few epochs. However, more exploration is required."},{"code":null,"e":21009,"s":20207,"text":"In addition, if you have a look at the details of my implementation, you will realise that the input tensors have a lot of unnecessary padding. Nothing will be learned from this padding but still has to be processed, i.e. this is inefficient for the GPU. To remedy this situation, one could group reviews of similar lengths into buckets and pad accordingly, reducing the computation required to process the documents. Furthermore, one could adjust both learning rate and batch size according to the document length. All these approaches have already been used to build language models (e.g see this presentation) and are readily available at the gluonnlp API. At this point, I have only scratched the surface of what this API can do and I am looking forward to more experimentation in the near future."},{"code":null,"e":21486,"s":21009,"text":"I have implemented “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016) using Pytorch and Mxnet to predict Amazon reviews scores, and compared the results with those of previous implementations that did not involve Deep Learning. HANs perform better across all the evaluation metrics, are relatively easy to implement and fast to train. Therefore, I believe this is an algorithm worth having in the repertoire for text classification tasks."},{"code":null,"e":21553,"s":21486,"text":"Other than that, and as always, I hope you found this post useful."},{"code":null,"e":21661,"s":21553,"text":"Any comments, suggestions, please email me at: jrzaurin@gmail.com or even better open an issue in the repo."},{"code":null,"e":21672,"s":21661,"text":"References"},{"code":null,"e":21828,"s":21672,"text":"Dzmitry Bahdanau, KyungHyun Cho, Yoshua Bengio 2016. neural machine translation by jointly learning to align and translate. https://arxiv.org/abs/1409.0473"},{"code":null,"e":21975,"s":21828,"text":"Yarin Gal, Zoubin Ghahramani 2015. A Theoretically Grounded Application of Dropout in Recurrent Neural Networks. https://arxiv.org/abs/1512.05287."},{"code":null,"e":22144,"s":21975,"text":"Ruining He, Julian McAuley 2016. Ups and Downs: Modeling the Visual Evolution of Fashion Trends with One-Class Collaborative Filtering. https://arxiv.org/abs/1602.01585"},{"code":null,"e":22326,"s":22144,"text":"Julian McAuley , Christopher Targett , Qinfeng (‘Javen’) Shi , and Anton van den Hengel 2015. Image-based Recommendations on Styles and Substitutes. https://arxiv.org/abs/1506.04757"},{"code":null,"e":22469,"s":22326,"text":"Stephen Merity, Nitish Shirish Keskar, Richard Socher 2017. Regularizing and Optimizing LSTM Language Models. https://arxiv.org/abs/1708.02182"},{"code":null,"e":22651,"s":22469,"text":"Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna 2015. Rethinking the Inception Architecture for Computer Vision. https://arxiv.org/abs/1512.00567"},{"code":null,"e":22814,"s":22651,"text":"Li Wan, Matthew Zeiler, Sixin Zhang, Yann LeCun, Rob Fergus 2013. Regularization of Neural Networks using DropConnect. http://proceedings.mlr.press/v28/wan13.html"},{"code":null,"e":22966,"s":22814,"text":"Jason Wei, Kai Zou 2019. EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks. https://arxiv.org/abs/1901.11196"}],"string":"[\n {\n \"code\": null,\n \"e\": 843,\n \"s\": 171,\n \"text\": \"This post and the code here are part of a larger repo that I have (very creatively) called “NLP-stuff”. As the name indicates, I include in that repo projects that I do and/or ideas that I have — as long as there is code associated with those ideas — that are related to NLP. In every directory, I have included a README file and a series of explanatory notebooks that I hope help explaining the code. I intend to keep adding projects throughout 2020, not necessarily the latest and/or most popular releases, but simply papers or algorithms I find interesting and useful. In particular, the code related to this post is in the directory amazon_reviews_classification_HAN.\"\n },\n {\n \"code\": null,\n \"e\": 1571,\n \"s\": 843,\n \"text\": \"First things first, let’s start by acknowledging the relevant people that did the hard work. This post and the companion repo are based on the paper “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016). In addition, I have also used in my implementation the results, and code, presented in “Regularizing and Optimizing LSTM Language Models” (Stephen Merity, Nitish Shirish Keskar and Richard Socher, 2017). The dataset that I have used for this and other experiments in the repo is the Amazon product data (J. McAuley et al., 2015 and R. He, J. McAuley 2016), in particular the Clothing, Shoes and Jewellery dataset. I strongly recommend having a look at these papers and references therein.\"\n },\n {\n \"code\": null,\n \"e\": 1736,\n \"s\": 1571,\n \"text\": \"Once that is done let’s start by describing the network architecture we will be implementing here. The following figure is Figure 2 in the Zichao Yang et al, paper.\"\n },\n {\n \"code\": null,\n \"e\": 2543,\n \"s\": 1736,\n \"text\": \"We consider a document comprised of L sentences si and each sentence contains Ti words. w_it with t ∈ [1, T], represents the words in the i-th sentence. As shown in the figure, the authors used a word encoder (a bidirectional GRU, Bahdanau et al., 2014), along with a word attention mechanism to encode each sentence into a vector representation. These sentence representations are passed through a sentence encoder with a sentence attention mechanism resulting in a document vector representation. This final representation is passed to a fully connected layer with the corresponding activation function for prediction. The word “hierarchical” refers here to the process of encoding first sentences from words, and then documents from sentences, naturally following the “semantic hierarchy” in a document.\"\n },\n {\n \"code\": null,\n \"e\": 2571,\n \"s\": 2543,\n \"text\": \"1.1 The Attention Mechanism\"\n },\n {\n \"code\": null,\n \"e\": 3067,\n \"s\": 2571,\n \"text\": \"Assuming one is familiar with the GRU formulation (if not have a look here), all the math one needs to understand the attention mechanism is included below. The mathematical expressions I include here refer to the word attention mechanism. The sentence attention mechanism is identical, but at sentence level. Therefore, I believe explaining the following expressions, along with the code snippets below, will be enough to understand the full process. The first 3 expression are pretty standard:\"\n },\n {\n \"code\": null,\n \"e\": 3322,\n \"s\": 3067,\n \"text\": \"Where x_it is the word embedding vector of word t in sentence i. The vectors h_it are the forward and backward output features from the bidirectional GRU, which are concatenated before applying attention. The attention mechanism is formulated as follows:\"\n },\n {\n \"code\": null,\n \"e\": 3929,\n \"s\": 3322,\n \"text\": \"First, the h_it features go through a one-layer MLP with a hyperbolic tangent function. This results in a hidden representation of h_it, u_it. Then, the importance of each word is measured as the dot product between u_it and a context vector u_w, obtaining a so-called normalised importance weight α_it. After that, the sentence vector si is computed as the weighted sum of the h_it features based on the normalised importance weights. For more details, please, read the paper, section 2.2 “Hierarchical Attention”. As mentioned earlier, the sentence attention mechanism is identical but at sentence level.\"\n },\n {\n \"code\": null,\n \"e\": 3974,\n \"s\": 3929,\n \"text\": \"Word and sentence attention can be coded as:\"\n },\n {\n \"code\": null,\n \"e\": 3983,\n \"s\": 3974,\n \"text\": \"Pytorch:\"\n },\n {\n \"code\": null,\n \"e\": 3990,\n \"s\": 3983,\n \"text\": \"Mxnet:\"\n },\n {\n \"code\": null,\n \"e\": 4069,\n \"s\": 3990,\n \"text\": \"where inp refers to h_it and h_i for word and sentence attention respectively.\"\n },\n {\n \"code\": null,\n \"e\": 4614,\n \"s\": 4069,\n \"text\": \"As one can see, the Mxnet implementation is nearly identical to that in Pytorch, albeit with some subtle differences. This is going to be the case throughout the whole HAN implementation. However, I would like to add a few lines to clarify the following: this is my second “serious” dive into Mxnet and Gluon. The more I use it, the more I like it, but I am pretty sure that I could have written a better, more efficient code. With that in mind, if you, the reader, are a Mxnet user and have suggestions and comments, I would love to hear them.\"\n },\n {\n \"code\": null,\n \"e\": 4650,\n \"s\": 4614,\n \"text\": \"1.1.1 Word Encoder + Word Attention\"\n },\n {\n \"code\": null,\n \"e\": 4929,\n \"s\": 4650,\n \"text\": \"Once we have the AttentionWithContext class, coding WordAttnNet (Word Encoder + Word Attention) is straightforward. The snippet below is a simplified version of that in the repo, but contains the main components. For the full version, please have a look at the code in the repo.\"\n },\n {\n \"code\": null,\n \"e\": 4937,\n \"s\": 4929,\n \"text\": \"Pytorch\"\n },\n {\n \"code\": null,\n \"e\": 4943,\n \"s\": 4937,\n \"text\": \"Mxnet\"\n },\n {\n \"code\": null,\n \"e\": 5184,\n \"s\": 4943,\n \"text\": \"You will notice the presence of 3 dropout related parameters: embed_drop , weight_drop and locked_drop . I will describe them in detail in Section 2. For the time being, let’s ignore them and focus on the remaining components of the module.\"\n },\n {\n \"code\": null,\n \"e\": 5513,\n \"s\": 5184,\n \"text\": \"Simply, the input tokens ( X ) go through the embeddings lookup table ( word_embed). The resulting token embeddings go through the bidirectional GRU ( rnn) and the output of the GRU goes to AttentionWithContext ( word_attn ) which will return the importance weights (α), the sentence representation (s) and the hidden state h_n.\"\n },\n {\n \"code\": null,\n \"e\": 5859,\n \"s\": 5513,\n \"text\": \"Note that returning the hidden state is necessary since the document (the amazon review here) is comprised of a series of sentences. Therefore, the initial hidden state of sentence i+1 will be the last hidden state of sentence i. We could say that we will treat the documents themselves as “stateful”. I will come back to this later in the post.\"\n },\n {\n \"code\": null,\n \"e\": 5903,\n \"s\": 5859,\n \"text\": \"1.1.2 Sentence Encoder + Sentence Attention\"\n },\n {\n \"code\": null,\n \"e\": 6053,\n \"s\": 5903,\n \"text\": \"Given the fact that we do not need an embedding lookup table for the sentence encoder, SentAttnNet (Sentence Encoder + Sentence Attention) is simply:\"\n },\n {\n \"code\": null,\n \"e\": 6061,\n \"s\": 6053,\n \"text\": \"Pytorch\"\n },\n {\n \"code\": null,\n \"e\": 6067,\n \"s\": 6061,\n \"text\": \"Mxnet\"\n },\n {\n \"code\": null,\n \"e\": 6241,\n \"s\": 6067,\n \"text\": \"Here, the network will receive the output of WordAttnNet ( X ), which will then go through the bidirectional GRU ( rnn ) and then through AttentionWithContext ( sent_attn ).\"\n },\n {\n \"code\": null,\n \"e\": 6305,\n \"s\": 6241,\n \"text\": \"At this point, we have all the building blocks to code the HAN.\"\n },\n {\n \"code\": null,\n \"e\": 6350,\n \"s\": 6305,\n \"text\": \"1.1.3 Hierarchical Attention Networks (HANs)\"\n },\n {\n \"code\": null,\n \"e\": 6358,\n \"s\": 6350,\n \"text\": \"Pytorch\"\n },\n {\n \"code\": null,\n \"e\": 6364,\n \"s\": 6358,\n \"text\": \"Mxnet\"\n },\n {\n \"code\": null,\n \"e\": 6671,\n \"s\": 6364,\n \"text\": \"I believe it might be useful here to illustrate the flow of the data through the network with some numbers related to the dimensions of tensors as they navigate the network. Let’s assume we use batch sizes ( bsz ) of 32, token embedding of dim ( embed_dim ) 100 and GRUs with hidden size ( hidden_dim ) 64.\"\n },\n {\n \"code\": null,\n \"e\": 6973,\n \"s\": 6671,\n \"text\": \"The input to HierAttnNet in the snippet before X is a tensor of dim (bzs, maxlen_doc, maxlen_sent) where maxlen_doc and maxlen_sent are the maximum number of sentences per document and words per sentence. Let’s assume that these numbers are 5 and 20. Therefore, X is here a tensor of dim (32, 5, 20) .\"\n },\n {\n \"code\": null,\n \"e\": 7299,\n \"s\": 6973,\n \"text\": \"The first thing we do is to permute the axes, resulting in a tensor of dim (5, 32, 20) . This is because we are going to process one sentence at a time feeding the last hidden state of one sentence as the initial hidden state of the next sentence, in a “stateful” manner. This will happen within the loop in the forward pass.\"\n },\n {\n \"code\": null,\n \"e\": 7961,\n \"s\": 7299,\n \"text\": \"In that loop, we are going to process one sentence at a time, i.e. a tensor of dim (32, 20) containing the i-th sentence for all 32 reviews in the batch. This tensor is then passed to wordattnnet , which is simply Word Encoder + Word Attention as described before. There, it will first go through the embeddings layer, resulting in a tensor of dim (32, 20, 100) . Then through the bidirectional GRU, resulting in a tensor of dim (32, 20, 128) and finally through the attention mechanism, resulting in a tensor of dim (32, 1, 128) . This last tensor is si in equation 7 in the Zichao Yang, et al paper, and corresponds to the i-th sentence vector representation.\"\n },\n {\n \"code\": null,\n \"e\": 8544,\n \"s\": 7961,\n \"text\": \"After running the loop we will have maxlen_doc (i.e. 5) tensors of dim (32, 1, 128) that will be concatenated along the 2nd dimension, resulting in a tensor of dim (32, 5, 128) → (bsz, maxlen_doc, hidden_dim*2). This tensor is then passed through sentattnnet , which is simply Sentence Encoder + Sentence Attention as described before. There it will first go through the bidirectional GRU, resulting in a tensor of dim (32, 5, 128) and finally through the attention mechanism resulting in a tensor of dim (32, 128) . This last tensor will be the v in the equation 10 in their paper.\"\n },\n {\n \"code\": null,\n \"e\": 8641,\n \"s\": 8544,\n \"text\": \"Finally, v is then passed through a fully connected layer and a Softmax function for prediction.\"\n },\n {\n \"code\": null,\n \"e\": 8908,\n \"s\": 8641,\n \"text\": \"When I started to run experiments I noticed that the model overfitted quite early during training. The best validation loss and accuracy happened within the first couple of epochs, or even after the first epoch. When overfitting occurs there are a number of options:\"\n },\n {\n \"code\": null,\n \"e\": 9033,\n \"s\": 8908,\n \"text\": \"Reduce model complexity: I explore this by running a number of models with a small number of embeddings and/or hidden sizes.\"\n },\n {\n \"code\": null,\n \"e\": 9097,\n \"s\": 9033,\n \"text\": \"Early Stopping: this is always used via an early_stop function.\"\n },\n {\n \"code\": null,\n \"e\": 9269,\n \"s\": 9097,\n \"text\": \"Additional regularisation, such as dropout, label smoothing (Christian Szegedy et al, 2015) or data augmentation. I write “additional” because I already used weight decay.\"\n },\n {\n \"code\": null,\n \"e\": 9523,\n \"s\": 9269,\n \"text\": \"I have not explored label smoothing or data augmentation in this exercise. If you want to dig a bit more into how to implement label smoothing in Pytorch, have a look at this repo. In the case of Mxnet, the gluonnlp API has its own LabelSmoothing class.\"\n },\n {\n \"code\": null,\n \"e\": 10035,\n \"s\": 9523,\n \"text\": \"Regarding data augmentation, the truth is that I have not tried it here and perhaps I should. Not only because it normally leads to notable improvements in terms of model generalisation, but moreover because I already have most of the code from another experiment where I implemented EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks (Jason Wei and Kai Zou 2019). Nonetheless, one has to stop somewhere and I decided to focus on exploring different dropout mechanisms.\"\n },\n {\n \"code\": null,\n \"e\": 10658,\n \"s\": 10035,\n \"text\": \"The 3 different forms of dropout I used here are: embedding dropout, locked dropout and weight dropout. The code that I used is taken directly from the salesforce repo corresponding to the implementation of the AWD-LTSM (Merity, hirish Keskar and Socher, 2017). In this section, I will focus on discussing the Pytorch implementation, but I will also include information regarding Mxnet’s implementation. Note that these dropout mechanisms were initially thought and implemented in the context of language models. However, there is no reason why they should not work here (or at least no reason why we should not try them).\"\n },\n {\n \"code\": null,\n \"e\": 10680,\n \"s\": 10658,\n \"text\": \"2.1 Embedding Dropout\"\n },\n {\n \"code\": null,\n \"e\": 11041,\n \"s\": 10680,\n \"text\": \"This is discussed in detail in Section 4.3. in the Merity et al paper and is based in the work of Gal & Ghahramani (2016). No one better than the authors themselves to explain it. In their own words: “This is equivalent to performing dropout on the embedding matrix at a word level, where the dropout is broadcast across all the word vector’s embedding. [...]”\"\n },\n {\n \"code\": null,\n \"e\": 11120,\n \"s\": 11041,\n \"text\": \"In code (the code below is a simplified version of that in the original repo):\"\n },\n {\n \"code\": null,\n \"e\": 11397,\n \"s\": 11120,\n \"text\": \"Basically, we create a mask of 0s and 1s along the 1st dimension of the embeddings tensor (the “word” dimension) and then we expand that mask along the second dimension (the “embedding” dimension), scaling the remaining weights accordingly. As the authors said, we drop words.\"\n },\n {\n \"code\": null,\n \"e\": 11416,\n \"s\": 11397,\n \"text\": \"2.2 Locked Dropout\"\n },\n {\n \"code\": null,\n \"e\": 11696,\n \"s\": 11416,\n \"text\": \"This is also based on the work of Gal & Ghahramani (2016). Again in the words of the authors: “[...] sample a binary dropout mask only once upon the first call and then to repeatedly use that locked dropout mask for all repeated connections within the forward and backward pass”.\"\n },\n {\n \"code\": null,\n \"e\": 11705,\n \"s\": 11696,\n \"text\": \"In code:\"\n },\n {\n \"code\": null,\n \"e\": 12122,\n \"s\": 11705,\n \"text\": \"Simply,LockedDropoutwill receive a 3-dim tensor, it will then generate a mask along the second dimension and expand that mask along the first dimension. For example, when applied to a tensor like (batch_size, seq_length, embed_dim), it will create a mask of dim (1, seq_length, embed_dim) and apply it to the whole batch. Mxnet’s nn.Dropout module has an axes parameter that directly implements this type of dropout.\"\n },\n {\n \"code\": null,\n \"e\": 12137,\n \"s\": 12122,\n \"text\": \"And finally...\"\n },\n {\n \"code\": null,\n \"e\": 12157,\n \"s\": 12137,\n \"text\": \"2.3. Weight Dropout\"\n },\n {\n \"code\": null,\n \"e\": 12405,\n \"s\": 12157,\n \"text\": \"This is discussed in Section 2 in their paper. Once again, in their own words: “We propose the use of DropConnect (Wan et al., 2013) on the recurrent hidden to hidden weight matrices which do not require any modifications to an RNN’s formulation.”\"\n },\n {\n \"code\": null,\n \"e\": 12484,\n \"s\": 12405,\n \"text\": \"In code (the code below is a simplified version of that in the original repo):\"\n },\n {\n \"code\": null,\n \"e\": 12983,\n \"s\": 12484,\n \"text\": \"WeightDrop will first copy and register the hidden-to-hidden weights (or in general terms the weights in the List weights) with a suffix _raw (line 14). Then, it will apply dropout and assign the weights again to the module (line 25 if variationalor 27 otherwise). As shown in the snippet, the variational option does the same as discussed before in the case of Embedding Dropout, i.e. generates a mask along the first dimension of the tensor and expands (or broadcasts) along the second dimension.\"\n },\n {\n \"code\": null,\n \"e\": 13331,\n \"s\": 12983,\n \"text\": \"There are a couple of drawbacks to this implementation. In the first place, given some input weights, the final model will contain the original weights (referred as weight_name_raw ) and those with dropout (refer as weight_name ), which is not very efficient. Secondly, it changes the name of the parameters, adding ‘ module’ to the original name.\"\n },\n {\n \"code\": null,\n \"e\": 13981,\n \"s\": 13331,\n \"text\": \"To be honest, these are not major drawbacks at all, but I can use them as an excuse to introduce another two implementations that are perhaps a bit better (although of course based on the original one). One is the implementation within the great text API at the fastai library. I guess at this point everyone knows about this library, but if you don’t let me write a couple of lines here. I find this library excellent, not only for the high level APIs that it offers, or the clever defaults, but also because there are a lot of little gems hidden in the source code. If you are not familiar with the library, give it a go, there is no turning back.\"\n },\n {\n \"code\": null,\n \"e\": 14283,\n \"s\": 13981,\n \"text\": \"Another nice implementation is the function apply_weight_drop at the Mxnet’s gluonnlp API, which I used here. In fact, in their implementation of the AWDRNN language model this function is used for both the embedding and the hidden-to-hidden weight dropout. It is available through their utils module:\"\n },\n {\n \"code\": null,\n \"e\": 14334,\n \"s\": 14283,\n \"text\": \"from gluonnlp.model.utils import apply_weight_drop\"\n },\n {\n \"code\": null,\n \"e\": 14407,\n \"s\": 14334,\n \"text\": \"As far as implementation goes, this is it. Time to run some experiments.\"\n },\n {\n \"code\": null,\n \"e\": 14420,\n \"s\": 14407,\n \"text\": \"3.1. Results\"\n },\n {\n \"code\": null,\n \"e\": 15068,\n \"s\": 14420,\n \"text\": \"I eventually recorded 59 experiments (I ran a few more), 40 of them using the Pytorch implementation and 19 using Mxnet. Throughout the experiments I used different batch sizes, learning rates, embedding dimensions, GRU hidden sizes, dropout rates, learning rate schedulers, optimisers, etc. They are all shown in Tables 1 and 2 in the notebook 04_Review_Score_Prediction_Results.ipynb. The best results on the test dataset for each implementation are shown in the table below, along with the best result I obtained from previous attempts using tf-idf along with LightGBM and Hyperopt for the classification and hyper-parameter optimisation tasks.\"\n },\n {\n \"code\": null,\n \"e\": 15507,\n \"s\": 15068,\n \"text\": \"In the first place, it is worth reiterating that I only run 19 experiments with the Mxnet implementation. This is in part due to the fact that, as I mentioned earlier in the post, I have more experience with Pytorch than with Mxnet and Gluon, which influenced the corresponding experimentation. Therefore, it is quite possible that I missed a minor tweak to the Mxnet models that would have lead to better results than those in the table.\"\n },\n {\n \"code\": null,\n \"e\": 15885,\n \"s\": 15507,\n \"text\": \"Other than that we can see that the HAN-Pytorch model performs better than a thoroughly tuned tf-idf+LighGBM model on the test dataset for all, accuracy, F1 score and precision. Therefore, the next immediate question most will be asking is: is it worth using HAN over tf-idf+LightGBM (or your favourite classifier)? And the answer is, as with most things in life, “it depends”.\"\n },\n {\n \"code\": null,\n \"e\": 16486,\n \"s\": 15885,\n \"text\": \"It is true that HANs perform better, but the increase is relatively small. In general, leaving aside the particular case of the Amazon reviews, if in your business a ~3% F1 score is important (i.e. leads to a sizeable increase in revenue, savings or some other benefits) then there is no question, one would use the DL approach. On top of that, attention mechanisms might give you some additional, useful information (such as the expressions within the text that lead to a certain classification) beyond just the keywords that one would obtain by using approaches such as tf-idf (or topic modelling).\"\n },\n {\n \"code\": null,\n \"e\": 16970,\n \"s\": 16486,\n \"text\": \"Finally, my implementation of HANs is inefficient (see next section). Even in that scenario, the results presented in the table are always obtained in less than 10 epochs and each epoch runs in around 3min (or less depending on the batch sizes) on a Tesla K80. Therefore, this is certainly not a computationally expensive algorithm to train and performs well. In summary, I’d say that HANs are a good algorithm to have in your repertoire when it comes to perform text classification.\"\n },\n {\n \"code\": null,\n \"e\": 16996,\n \"s\": 16970,\n \"text\": \"3.2 Visualising Attention\"\n },\n {\n \"code\": null,\n \"e\": 17107,\n \"s\": 16996,\n \"text\": \"Let’s now have a look at the attention weights, in particular to the word and sentence importance weights (α).\"\n },\n {\n \"code\": null,\n \"e\": 17641,\n \"s\": 17107,\n \"text\": \"Figure 2 shows both word and sentence attention weights for two reviews that were classified correctly. The xxmaj token is a special token introduced by the fastai tokenizer to indicate that the next token starts with a capital letter. In addition, it is worth mentioning that in the original dataset review scores range from 1–5 stars. During preprocessing, I merge reviews with 1 and 2 starts into one class and re-label the classes to start from 0 (see here for details). Therefore, the final number of classes is 4: {0, 1, 2, 3}.\"\n },\n {\n \"code\": null,\n \"e\": 17993,\n \"s\": 17641,\n \"text\": \"The figure shows how, when predicting the review score, the HAN places attention to phrases and constructions like “fit was perfect”, “very superior” or “rubs [...] wrong places”, as well as isolated words like “bought” or “not”. In addition, we can see that in the top plot, a bit more attention is placed in the 3rd sentence relative to the other 3.\"\n },\n {\n \"code\": null,\n \"e\": 18536,\n \"s\": 17993,\n \"text\": \"Figure 3 shows both word and sentence attention weights for two reviews that were misclassified. The top review was predicted as 0 while the true score was 3 (real score in the original dataset is 5). Someone found those boots “yuck”, “disappointing” and “bad” yet gave them a 5 star score. The review at the bottom was predicted as 3 while the true score was 0 (real score in the original dataset is 1). It is easy to understand why the HAN misclassified this review mostly based on the first sentence, where it places the highest attention.\"\n },\n {\n \"code\": null,\n \"e\": 18786,\n \"s\": 18536,\n \"text\": \"Nonetheless, the figures show that the attention mechanism works well, capturing the relevant pieces in the reviews that lead to a certain classification. Notebook 05_Visualizing_Attention.ipynb contains the code that I used to generate these plots.\"\n },\n {\n \"code\": null,\n \"e\": 19243,\n \"s\": 18786,\n \"text\": \"At this stage, there are a few comments worth making. First of all, I ran all the experiments manually (with a bash file), which is not the best way of optimising the hyper-parameters of the model. Ideally, one would like to wrap up the train and validation processes in an objective function and use Hyperopt, as I did with all the other experiments in the repo that focus on text classification. I will include a .py script to do that in the near future.\"\n },\n {\n \"code\": null,\n \"e\": 19672,\n \"s\": 19243,\n \"text\": \"On the other hand, looking at figures 2 and 3 one can see that attention is normally focused on isolated words or constructions and phrases or 2 or 3 words. Therefore, one might think that using a non-DL approach along with n-grams might improve the results in the table. I actually did that in this notebook and the difference between using or not using n-grams (in particular bigrams via gensim.models.phrases ) is negligible.\"\n },\n {\n \"code\": null,\n \"e\": 20207,\n \"s\": 19672,\n \"text\": \"Other issues worth discussing are related to model generalisation and efficiency. For example, I already mentioned that one could use label smoothing and data augmentation to add regularisation. In fact, even after adding some dropout, the best validation loss and metrics are still obtained early during training, moreover in the case of the Mxnet implementation. This is not necessarily bad and might simply reflect the fact that the model reaches its best performance just after a few epochs. However, more exploration is required.\"\n },\n {\n \"code\": null,\n \"e\": 21009,\n \"s\": 20207,\n \"text\": \"In addition, if you have a look at the details of my implementation, you will realise that the input tensors have a lot of unnecessary padding. Nothing will be learned from this padding but still has to be processed, i.e. this is inefficient for the GPU. To remedy this situation, one could group reviews of similar lengths into buckets and pad accordingly, reducing the computation required to process the documents. Furthermore, one could adjust both learning rate and batch size according to the document length. All these approaches have already been used to build language models (e.g see this presentation) and are readily available at the gluonnlp API. At this point, I have only scratched the surface of what this API can do and I am looking forward to more experimentation in the near future.\"\n },\n {\n \"code\": null,\n \"e\": 21486,\n \"s\": 21009,\n \"text\": \"I have implemented “Hierarchical Attention Networks for Document Classification” (Zichao Yang, et al, 2016) using Pytorch and Mxnet to predict Amazon reviews scores, and compared the results with those of previous implementations that did not involve Deep Learning. HANs perform better across all the evaluation metrics, are relatively easy to implement and fast to train. Therefore, I believe this is an algorithm worth having in the repertoire for text classification tasks.\"\n },\n {\n \"code\": null,\n \"e\": 21553,\n \"s\": 21486,\n \"text\": \"Other than that, and as always, I hope you found this post useful.\"\n },\n {\n \"code\": null,\n \"e\": 21661,\n \"s\": 21553,\n \"text\": \"Any comments, suggestions, please email me at: jrzaurin@gmail.com or even better open an issue in the repo.\"\n },\n {\n \"code\": null,\n \"e\": 21672,\n \"s\": 21661,\n \"text\": \"References\"\n },\n {\n \"code\": null,\n \"e\": 21828,\n \"s\": 21672,\n \"text\": \"Dzmitry Bahdanau, KyungHyun Cho, Yoshua Bengio 2016. neural machine translation by jointly learning to align and translate. https://arxiv.org/abs/1409.0473\"\n },\n {\n \"code\": null,\n \"e\": 21975,\n \"s\": 21828,\n \"text\": \"Yarin Gal, Zoubin Ghahramani 2015. A Theoretically Grounded Application of Dropout in Recurrent Neural Networks. https://arxiv.org/abs/1512.05287.\"\n },\n {\n \"code\": null,\n \"e\": 22144,\n \"s\": 21975,\n \"text\": \"Ruining He, Julian McAuley 2016. Ups and Downs: Modeling the Visual Evolution of Fashion Trends with One-Class Collaborative Filtering. https://arxiv.org/abs/1602.01585\"\n },\n {\n \"code\": null,\n \"e\": 22326,\n \"s\": 22144,\n \"text\": \"Julian McAuley , Christopher Targett , Qinfeng (‘Javen’) Shi , and Anton van den Hengel 2015. Image-based Recommendations on Styles and Substitutes. https://arxiv.org/abs/1506.04757\"\n },\n {\n \"code\": null,\n \"e\": 22469,\n \"s\": 22326,\n \"text\": \"Stephen Merity, Nitish Shirish Keskar, Richard Socher 2017. Regularizing and Optimizing LSTM Language Models. https://arxiv.org/abs/1708.02182\"\n },\n {\n \"code\": null,\n \"e\": 22651,\n \"s\": 22469,\n \"text\": \"Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna 2015. Rethinking the Inception Architecture for Computer Vision. https://arxiv.org/abs/1512.00567\"\n },\n {\n \"code\": null,\n \"e\": 22814,\n \"s\": 22651,\n \"text\": \"Li Wan, Matthew Zeiler, Sixin Zhang, Yann LeCun, Rob Fergus 2013. Regularization of Neural Networks using DropConnect. http://proceedings.mlr.press/v28/wan13.html\"\n },\n {\n \"code\": null,\n \"e\": 22966,\n \"s\": 22814,\n \"text\": \"Jason Wei, Kai Zou 2019. EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks. https://arxiv.org/abs/1901.11196\"\n }\n]"}}},{"rowIdx":36,"cells":{"title":{"kind":"string","value":"Reading and Writing to text files in Python"},"text":{"kind":"string","value":"Like other languages, Python provides some inbuilt functions for reading, writing, or accessing files. Python can handle mainly two types of files. The normal text file and the binary files.\n For the text files, each lines are terminated with a special character '\\n' (It is known as EOL or End Of Line). For the Binary file, there is no line ending character. It saves the data after converting the content into bit stream.\nIn this section we will discuss about the text files.\nr\nIt is Read Only mode. It opens the text file for reading. When the file is not present, it raises I/O Error.\nr+\nThis mode for Reading and Writing. When the file is not present, it will raise I/O Error.\nw\nIt is for write only jobs. When file is not present, it will create a file first, then start writing, when the file is present, it will remove the contents of that file, and start writing from beginning.\nw+\nIt is Write and Read mode. When file is not present, it can create the file, or when the file is present, the data will be overwritten.\na\nThis is append mode. So it writes data at the end of a file.\na+\nAppend and Read mode. It can append data as well as read the data.\nNow see how a file can be written using writelines() and write() method.\n Live Demo\n#Create an empty file and write some lines\nline1 = 'This is first line. \\n'\nlines = ['This is another line to store into file.\\n',\n 'The Third Line for the file.\\n',\n 'Another line... !@#$%^&*()_+.\\n',\n 'End Line']\n#open the file as write mode\nmy_file = open('file_read_write.txt', 'w')\nmy_file.write(line1)\nmy_file.writelines(lines) #Write multiple lines\nmy_file.close()\nprint('Writing Complete')\nWriting Complete\n\nAfter writing the lines, we are appending some lines into the file.\n Live Demo\n#program to append some lines\nline1 = '\\n\\nThis is a new line. This line will be appended. \\n'\n#open the file as append mode\nmy_file = open('file_read_write.txt', 'a')\nmy_file.write(line1)\nmy_file.close()\nprint('Appending Done')\nAppending Done\n\nAt last, we will see how to read the file content from the read() and readline() method. We can provide some integer number 'n' to get first 'n' characters.\n#program to read from file\n#open the file as read mode\nmy_file = open('file_read_write.txt', 'r')\nprint('Show the full content:')\nprint(my_file.read())\n#Show first two lines\nmy_file.seek(0)\nprint('First two lines:')\nprint(my_file.readline(), end = '')\nprint(my_file.readline(), end = '')\n#Show upto 25 characters\nmy_file.seek(0)\nprint('\\n\\nFirst 25 characters:')\nprint(my_file.read(25), end = '')\nmy_file.close()\nShow the full content:\nThis is first line. \nThis is another line to store into file.\nThe Third Line for the file.\nAnother line... !@#$%^&*()_+.\nEnd Line\n\nThis is a new line. This line will be appended. \n\nFirst two lines:\nThis is first line. \nThis is another line to store into file.\n\nFirst 25 characters:\nThis is first line. \nThis"},"parsed":{"kind":"list like","value":[{"code":null,"e":1253,"s":1062,"text":"Like other languages, Python provides some inbuilt functions for reading, writing, or accessing files. Python can handle mainly two types of files. The normal text file and the binary files."},{"code":null,"e":1487,"s":1253,"text":" For the text files, each lines are terminated with a special character '\\n' (It is known as EOL or End Of Line). For the Binary file, there is no line ending character. It saves the data after converting the content into bit stream."},{"code":null,"e":1541,"s":1487,"text":"In this section we will discuss about the text files."},{"code":null,"e":1543,"s":1541,"text":"r"},{"code":null,"e":1652,"s":1543,"text":"It is Read Only mode. It opens the text file for reading. When the file is not present, it raises I/O Error."},{"code":null,"e":1655,"s":1652,"text":"r+"},{"code":null,"e":1745,"s":1655,"text":"This mode for Reading and Writing. When the file is not present, it will raise I/O Error."},{"code":null,"e":1747,"s":1745,"text":"w"},{"code":null,"e":1951,"s":1747,"text":"It is for write only jobs. When file is not present, it will create a file first, then start writing, when the file is present, it will remove the contents of that file, and start writing from beginning."},{"code":null,"e":1954,"s":1951,"text":"w+"},{"code":null,"e":2090,"s":1954,"text":"It is Write and Read mode. When file is not present, it can create the file, or when the file is present, the data will be overwritten."},{"code":null,"e":2092,"s":2090,"text":"a"},{"code":null,"e":2153,"s":2092,"text":"This is append mode. So it writes data at the end of a file."},{"code":null,"e":2156,"s":2153,"text":"a+"},{"code":null,"e":2223,"s":2156,"text":"Append and Read mode. It can append data as well as read the data."},{"code":null,"e":2296,"s":2223,"text":"Now see how a file can be written using writelines() and write() method."},{"code":null,"e":2307,"s":2296,"text":" Live Demo"},{"code":null,"e":2711,"s":2307,"text":"#Create an empty file and write some lines\nline1 = 'This is first line. \\n'\nlines = ['This is another line to store into file.\\n',\n 'The Third Line for the file.\\n',\n 'Another line... !@#$%^&*()_+.\\n',\n 'End Line']\n#open the file as write mode\nmy_file = open('file_read_write.txt', 'w')\nmy_file.write(line1)\nmy_file.writelines(lines) #Write multiple lines\nmy_file.close()\nprint('Writing Complete')"},{"code":null,"e":2729,"s":2711,"text":"Writing Complete\n"},{"code":null,"e":2797,"s":2729,"text":"After writing the lines, we are appending some lines into the file."},{"code":null,"e":2808,"s":2797,"text":" Live Demo"},{"code":null,"e":3037,"s":2808,"text":"#program to append some lines\nline1 = '\\n\\nThis is a new line. This line will be appended. \\n'\n#open the file as append mode\nmy_file = open('file_read_write.txt', 'a')\nmy_file.write(line1)\nmy_file.close()\nprint('Appending Done')"},{"code":null,"e":3053,"s":3037,"text":"Appending Done\n"},{"code":null,"e":3210,"s":3053,"text":"At last, we will see how to read the file content from the read() and readline() method. We can provide some integer number 'n' to get first 'n' characters."},{"code":null,"e":3623,"s":3210,"text":"#program to read from file\n#open the file as read mode\nmy_file = open('file_read_write.txt', 'r')\nprint('Show the full content:')\nprint(my_file.read())\n#Show first two lines\nmy_file.seek(0)\nprint('First two lines:')\nprint(my_file.readline(), end = '')\nprint(my_file.readline(), end = '')\n#Show upto 25 characters\nmy_file.seek(0)\nprint('\\n\\nFirst 25 characters:')\nprint(my_file.read(25), end = '')\nmy_file.close()"},{"code":null,"e":3955,"s":3623,"text":"Show the full content:\nThis is first line. \nThis is another line to store into file.\nThe Third Line for the file.\nAnother line... !@#$%^&*()_+.\nEnd Line\n\nThis is a new line. This line will be appended. \n\nFirst two lines:\nThis is first line. \nThis is another line to store into file.\n\nFirst 25 characters:\nThis is first line. \nThis\n"}],"string":"[\n {\n \"code\": null,\n \"e\": 1253,\n \"s\": 1062,\n \"text\": \"Like other languages, Python provides some inbuilt functions for reading, writing, or accessing files. Python can handle mainly two types of files. The normal text file and the binary files.\"\n },\n {\n \"code\": null,\n \"e\": 1487,\n \"s\": 1253,\n \"text\": \" For the text files, each lines are terminated with a special character '\\\\n' (It is known as EOL or End Of Line). For the Binary file, there is no line ending character. It saves the data after converting the content into bit stream.\"\n },\n {\n \"code\": null,\n \"e\": 1541,\n \"s\": 1487,\n \"text\": \"In this section we will discuss about the text files.\"\n },\n {\n \"code\": null,\n \"e\": 1543,\n \"s\": 1541,\n \"text\": \"r\"\n },\n {\n \"code\": null,\n \"e\": 1652,\n \"s\": 1543,\n \"text\": \"It is Read Only mode. It opens the text file for reading. When the file is not present, it raises I/O Error.\"\n },\n {\n \"code\": null,\n \"e\": 1655,\n \"s\": 1652,\n \"text\": \"r+\"\n },\n {\n \"code\": null,\n \"e\": 1745,\n \"s\": 1655,\n \"text\": \"This mode for Reading and Writing. When the file is not present, it will raise I/O Error.\"\n },\n {\n \"code\": null,\n \"e\": 1747,\n \"s\": 1745,\n \"text\": \"w\"\n },\n {\n \"code\": null,\n \"e\": 1951,\n \"s\": 1747,\n \"text\": \"It is for write only jobs. When file is not present, it will create a file first, then start writing, when the file is present, it will remove the contents of that file, and start writing from beginning.\"\n },\n {\n \"code\": null,\n \"e\": 1954,\n \"s\": 1951,\n \"text\": \"w+\"\n },\n {\n \"code\": null,\n \"e\": 2090,\n \"s\": 1954,\n \"text\": \"It is Write and Read mode. When file is not present, it can create the file, or when the file is present, the data will be overwritten.\"\n },\n {\n \"code\": null,\n \"e\": 2092,\n \"s\": 2090,\n \"text\": \"a\"\n },\n {\n \"code\": null,\n \"e\": 2153,\n \"s\": 2092,\n \"text\": \"This is append mode. So it writes data at the end of a file.\"\n },\n {\n \"code\": null,\n \"e\": 2156,\n \"s\": 2153,\n \"text\": \"a+\"\n },\n {\n \"code\": null,\n \"e\": 2223,\n \"s\": 2156,\n \"text\": \"Append and Read mode. It can append data as well as read the data.\"\n },\n {\n \"code\": null,\n \"e\": 2296,\n \"s\": 2223,\n \"text\": \"Now see how a file can be written using writelines() and write() method.\"\n },\n {\n \"code\": null,\n \"e\": 2307,\n \"s\": 2296,\n \"text\": \" Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 2711,\n \"s\": 2307,\n \"text\": \"#Create an empty file and write some lines\\nline1 = 'This is first line. \\\\n'\\nlines = ['This is another line to store into file.\\\\n',\\n 'The Third Line for the file.\\\\n',\\n 'Another line... !@#$%^&*()_+.\\\\n',\\n 'End Line']\\n#open the file as write mode\\nmy_file = open('file_read_write.txt', 'w')\\nmy_file.write(line1)\\nmy_file.writelines(lines) #Write multiple lines\\nmy_file.close()\\nprint('Writing Complete')\"\n },\n {\n \"code\": null,\n \"e\": 2729,\n \"s\": 2711,\n \"text\": \"Writing Complete\\n\"\n },\n {\n \"code\": null,\n \"e\": 2797,\n \"s\": 2729,\n \"text\": \"After writing the lines, we are appending some lines into the file.\"\n },\n {\n \"code\": null,\n \"e\": 2808,\n \"s\": 2797,\n \"text\": \" Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 3037,\n \"s\": 2808,\n \"text\": \"#program to append some lines\\nline1 = '\\\\n\\\\nThis is a new line. This line will be appended. \\\\n'\\n#open the file as append mode\\nmy_file = open('file_read_write.txt', 'a')\\nmy_file.write(line1)\\nmy_file.close()\\nprint('Appending Done')\"\n },\n {\n \"code\": null,\n \"e\": 3053,\n \"s\": 3037,\n \"text\": \"Appending Done\\n\"\n },\n {\n \"code\": null,\n \"e\": 3210,\n \"s\": 3053,\n \"text\": \"At last, we will see how to read the file content from the read() and readline() method. We can provide some integer number 'n' to get first 'n' characters.\"\n },\n {\n \"code\": null,\n \"e\": 3623,\n \"s\": 3210,\n \"text\": \"#program to read from file\\n#open the file as read mode\\nmy_file = open('file_read_write.txt', 'r')\\nprint('Show the full content:')\\nprint(my_file.read())\\n#Show first two lines\\nmy_file.seek(0)\\nprint('First two lines:')\\nprint(my_file.readline(), end = '')\\nprint(my_file.readline(), end = '')\\n#Show upto 25 characters\\nmy_file.seek(0)\\nprint('\\\\n\\\\nFirst 25 characters:')\\nprint(my_file.read(25), end = '')\\nmy_file.close()\"\n },\n {\n \"code\": null,\n \"e\": 3955,\n \"s\": 3623,\n \"text\": \"Show the full content:\\nThis is first line. \\nThis is another line to store into file.\\nThe Third Line for the file.\\nAnother line... !@#$%^&*()_+.\\nEnd Line\\n\\nThis is a new line. This line will be appended. \\n\\nFirst two lines:\\nThis is first line. \\nThis is another line to store into file.\\n\\nFirst 25 characters:\\nThis is first line. \\nThis\\n\"\n }\n]"}}},{"rowIdx":37,"cells":{"title":{"kind":"string","value":"Set.clear() function in JavaScript"},"text":{"kind":"string","value":"The clear() function of the Set object removes all elements from the current Set object.\nIts Syntax is as follows\nsetObj.clear()\n Live Demo\n\n\n JavaScript Example\n\n\n \n\n\nContents of the Set:"},"parsed":{"kind":"list like","value":[{"code":null,"e":1151,"s":1062,"text":"The clear() function of the Set object removes all elements from the current Set object."},{"code":null,"e":1176,"s":1151,"text":"Its Syntax is as follows"},{"code":null,"e":1191,"s":1176,"text":"setObj.clear()"},{"code":null,"e":1202,"s":1191,"text":" Live Demo"},{"code":null,"e":1650,"s":1202,"text":"\n\n JavaScript Example\n\n\n \n\n"},{"code":null,"e":1671,"s":1650,"text":"Contents of the Set:"}],"string":"[\n {\n \"code\": null,\n \"e\": 1151,\n \"s\": 1062,\n \"text\": \"The clear() function of the Set object removes all elements from the current Set object.\"\n },\n {\n \"code\": null,\n \"e\": 1176,\n \"s\": 1151,\n \"text\": \"Its Syntax is as follows\"\n },\n {\n \"code\": null,\n \"e\": 1191,\n \"s\": 1176,\n \"text\": \"setObj.clear()\"\n },\n {\n \"code\": null,\n \"e\": 1202,\n \"s\": 1191,\n \"text\": \" Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 1650,\n \"s\": 1202,\n \"text\": \"\\n\\n JavaScript Example\\n\\n\\n \\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 1671,\n \"s\": 1650,\n \"text\": \"Contents of the Set:\"\n }\n]"}}},{"rowIdx":38,"cells":{"title":{"kind":"string","value":"How to Eliminate Duplicate User Defined Objects as a Key from Java LinkedHashMap? - GeeksforGeeks"},"text":{"kind":"string","value":"04 Jan, 2021\nDuplicate user-defined objects as a key from Java LinkedHashMap can be removed and achieved by implementing equals and hashcode methods at the user-defined objects.\nExample:\nInput : LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Nashik}]\n Duplicate key = {[Grapes, 80], Delhi}\nOutput: LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Delhi}]\nSyntax:\nequals() Method:\npublic boolean equals (Object obj)\n\n// This method checks if some other Object\n// passed to it as an argument is equal to \n// the Object on which it is invoked.\nhashCode() Method:\npublic int hashCode()\n\n// This method returns the hash code value \n// for the object on which this method is invoked.\nBelow is the implementation of the problem statement:\nJava\n// Java Program to eliminate duplicate user defined// objects as a key from Java LinkedHashMapimport java.util.*;class Employee { private String name; private int id; // Constructor public Employee(String name, int id) { this.name = name; this.id = id; } // HashCode Method public int hashCode() { System.out.println(\"In hashcode method\"); int hashcode = 0; return hashcode; } // Equals Method public boolean equals(Object obj) { System.out.println(\"In equals method\"); if (obj instanceof Employee) { Employee emp = (Employee)obj; return (emp.name.equals(this.name) && emp.id == this.id); } else { return false; } } // Getters and Setters public String getName() { return name; } public void setName(String name) { this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String toString() { return \"Employee Id: \" + id + \" Name: \" + name; }} // Driver codepublic class Duplicate_Value { public static void main(String a[]) { // LinkedHashMap initialization LinkedHashMap lhm = new LinkedHashMap(); // Adding entries in LinkedHashMap lhm.put(new Employee(\"John\", 1020), 1); lhm.put(new Employee(\"Ravi\", 1040), 2); lhm.put(new Employee(\"Jaya\", 1030), 3); // Print LinkedHashMap for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \"=>\" + entry.getValue()); } // Create duplicate entry Employee duplicate = new Employee(\"John\", 1020); System.out.println(\"Inserting duplicate record...\"); // Add duplicate entry lhm.put(duplicate, 4); System.out.println(\"After insertion:\"); for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \"=>\" + entry.getValue()); } }}\nIn hashcode method\nIn hashcode method\nIn equals method\nIn hashcode method\nIn equals method\nEmployee Id: 1020 Name: John\nInserting duplicate record...\nIn hashcode method\nIn equals method\nAfter insertion:\nEmployee Id: 1020 Name: John\nTime Complexity: O(1)\nJava-LinkedHashMap\nPicked\nTechnical Scripter 2020\nJava\nJava Programs\nTechnical Scripter\nJava\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nComments\nOld Comments\nFunctional Interfaces in Java\nStream In Java\nConstructors in Java\nDifferent ways of Reading a text file in Java\nExceptions in Java\nConvert a String to Character array in Java\nJava Programming Examples\nConvert Double to Integer in Java\nImplementing a Linked List in Java using Class\nHow to Iterate HashMap in Java?"},"parsed":{"kind":"list like","value":[{"code":null,"e":23557,"s":23529,"text":"\n04 Jan, 2021"},{"code":null,"e":23722,"s":23557,"text":"Duplicate user-defined objects as a key from Java LinkedHashMap can be removed and achieved by implementing equals and hashcode methods at the user-defined objects."},{"code":null,"e":23731,"s":23722,"text":"Example:"},{"code":null,"e":23918,"s":23731,"text":"Input : LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Nashik}]\n Duplicate key = {[Grapes, 80], Delhi}\nOutput: LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Delhi}]"},{"code":null,"e":23926,"s":23918,"text":"Syntax:"},{"code":null,"e":23943,"s":23926,"text":"equals() Method:"},{"code":null,"e":24105,"s":23943,"text":"public boolean equals (Object obj)\n\n// This method checks if some other Object\n// passed to it as an argument is equal to \n// the Object on which it is invoked."},{"code":null,"e":24124,"s":24105,"text":"hashCode() Method:"},{"code":null,"e":24242,"s":24124,"text":"public int hashCode()\n\n// This method returns the hash code value \n// for the object on which this method is invoked."},{"code":null,"e":24296,"s":24242,"text":"Below is the implementation of the problem statement:"},{"code":null,"e":24301,"s":24296,"text":"Java"},{"code":"// Java Program to eliminate duplicate user defined// objects as a key from Java LinkedHashMapimport java.util.*;class Employee { private String name; private int id; // Constructor public Employee(String name, int id) { this.name = name; this.id = id; } // HashCode Method public int hashCode() { System.out.println(\"In hashcode method\"); int hashcode = 0; return hashcode; } // Equals Method public boolean equals(Object obj) { System.out.println(\"In equals method\"); if (obj instanceof Employee) { Employee emp = (Employee)obj; return (emp.name.equals(this.name) && emp.id == this.id); } else { return false; } } // Getters and Setters public String getName() { return name; } public void setName(String name) { this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String toString() { return \"Employee Id: \" + id + \" Name: \" + name; }} // Driver codepublic class Duplicate_Value { public static void main(String a[]) { // LinkedHashMap initialization LinkedHashMap lhm = new LinkedHashMap(); // Adding entries in LinkedHashMap lhm.put(new Employee(\"John\", 1020), 1); lhm.put(new Employee(\"Ravi\", 1040), 2); lhm.put(new Employee(\"Jaya\", 1030), 3); // Print LinkedHashMap for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \"=>\" + entry.getValue()); } // Create duplicate entry Employee duplicate = new Employee(\"John\", 1020); System.out.println(\"Inserting duplicate record...\"); // Add duplicate entry lhm.put(duplicate, 4); System.out.println(\"After insertion:\"); for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \"=>\" + entry.getValue()); } }}","e":26487,"s":24301,"text":null},{"code":null,"e":26721,"s":26487,"text":"In hashcode method\nIn hashcode method\nIn equals method\nIn hashcode method\nIn equals method\nEmployee Id: 1020 Name: John\nInserting duplicate record...\nIn hashcode method\nIn equals method\nAfter insertion:\nEmployee Id: 1020 Name: John"},{"code":null,"e":26743,"s":26721,"text":"Time Complexity: O(1)"},{"code":null,"e":26762,"s":26743,"text":"Java-LinkedHashMap"},{"code":null,"e":26769,"s":26762,"text":"Picked"},{"code":null,"e":26793,"s":26769,"text":"Technical Scripter 2020"},{"code":null,"e":26798,"s":26793,"text":"Java"},{"code":null,"e":26812,"s":26798,"text":"Java Programs"},{"code":null,"e":26831,"s":26812,"text":"Technical Scripter"},{"code":null,"e":26836,"s":26831,"text":"Java"},{"code":null,"e":26934,"s":26836,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":26943,"s":26934,"text":"Comments"},{"code":null,"e":26956,"s":26943,"text":"Old Comments"},{"code":null,"e":26986,"s":26956,"text":"Functional Interfaces in Java"},{"code":null,"e":27001,"s":26986,"text":"Stream In Java"},{"code":null,"e":27022,"s":27001,"text":"Constructors in Java"},{"code":null,"e":27068,"s":27022,"text":"Different ways of Reading a text file in Java"},{"code":null,"e":27087,"s":27068,"text":"Exceptions in Java"},{"code":null,"e":27131,"s":27087,"text":"Convert a String to Character array in Java"},{"code":null,"e":27157,"s":27131,"text":"Java Programming Examples"},{"code":null,"e":27191,"s":27157,"text":"Convert Double to Integer in Java"},{"code":null,"e":27238,"s":27191,"text":"Implementing a Linked List in Java using Class"}],"string":"[\n {\n \"code\": null,\n \"e\": 23557,\n \"s\": 23529,\n \"text\": \"\\n04 Jan, 2021\"\n },\n {\n \"code\": null,\n \"e\": 23722,\n \"s\": 23557,\n \"text\": \"Duplicate user-defined objects as a key from Java LinkedHashMap can be removed and achieved by implementing equals and hashcode methods at the user-defined objects.\"\n },\n {\n \"code\": null,\n \"e\": 23731,\n \"s\": 23722,\n \"text\": \"Example:\"\n },\n {\n \"code\": null,\n \"e\": 23918,\n \"s\": 23731,\n \"text\": \"Input : LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Nashik}]\\n Duplicate key = {[Grapes, 80], Delhi}\\nOutput: LinkedHashMap = [{[Apple, 40], Kashmir}, {[Grapes, 80], Delhi}]\"\n },\n {\n \"code\": null,\n \"e\": 23926,\n \"s\": 23918,\n \"text\": \"Syntax:\"\n },\n {\n \"code\": null,\n \"e\": 23943,\n \"s\": 23926,\n \"text\": \"equals() Method:\"\n },\n {\n \"code\": null,\n \"e\": 24105,\n \"s\": 23943,\n \"text\": \"public boolean equals (Object obj)\\n\\n// This method checks if some other Object\\n// passed to it as an argument is equal to \\n// the Object on which it is invoked.\"\n },\n {\n \"code\": null,\n \"e\": 24124,\n \"s\": 24105,\n \"text\": \"hashCode() Method:\"\n },\n {\n \"code\": null,\n \"e\": 24242,\n \"s\": 24124,\n \"text\": \"public int hashCode()\\n\\n// This method returns the hash code value \\n// for the object on which this method is invoked.\"\n },\n {\n \"code\": null,\n \"e\": 24296,\n \"s\": 24242,\n \"text\": \"Below is the implementation of the problem statement:\"\n },\n {\n \"code\": null,\n \"e\": 24301,\n \"s\": 24296,\n \"text\": \"Java\"\n },\n {\n \"code\": \"// Java Program to eliminate duplicate user defined// objects as a key from Java LinkedHashMapimport java.util.*;class Employee { private String name; private int id; // Constructor public Employee(String name, int id) { this.name = name; this.id = id; } // HashCode Method public int hashCode() { System.out.println(\\\"In hashcode method\\\"); int hashcode = 0; return hashcode; } // Equals Method public boolean equals(Object obj) { System.out.println(\\\"In equals method\\\"); if (obj instanceof Employee) { Employee emp = (Employee)obj; return (emp.name.equals(this.name) && emp.id == this.id); } else { return false; } } // Getters and Setters public String getName() { return name; } public void setName(String name) { this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String toString() { return \\\"Employee Id: \\\" + id + \\\" Name: \\\" + name; }} // Driver codepublic class Duplicate_Value { public static void main(String a[]) { // LinkedHashMap initialization LinkedHashMap lhm = new LinkedHashMap(); // Adding entries in LinkedHashMap lhm.put(new Employee(\\\"John\\\", 1020), 1); lhm.put(new Employee(\\\"Ravi\\\", 1040), 2); lhm.put(new Employee(\\\"Jaya\\\", 1030), 3); // Print LinkedHashMap for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \\\"=>\\\" + entry.getValue()); } // Create duplicate entry Employee duplicate = new Employee(\\\"John\\\", 1020); System.out.println(\\\"Inserting duplicate record...\\\"); // Add duplicate entry lhm.put(duplicate, 4); System.out.println(\\\"After insertion:\\\"); for (Map.Entry entry : lhm.entrySet()) { System.out.println(entry.getKey() + \\\"=>\\\" + entry.getValue()); } }}\",\n \"e\": 26487,\n \"s\": 24301,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 26721,\n \"s\": 26487,\n \"text\": \"In hashcode method\\nIn hashcode method\\nIn equals method\\nIn hashcode method\\nIn equals method\\nEmployee Id: 1020 Name: John\\nInserting duplicate record...\\nIn hashcode method\\nIn equals method\\nAfter insertion:\\nEmployee Id: 1020 Name: John\"\n },\n {\n \"code\": null,\n \"e\": 26743,\n \"s\": 26721,\n \"text\": \"Time Complexity: O(1)\"\n },\n {\n \"code\": null,\n \"e\": 26762,\n \"s\": 26743,\n \"text\": \"Java-LinkedHashMap\"\n },\n {\n \"code\": null,\n \"e\": 26769,\n \"s\": 26762,\n \"text\": \"Picked\"\n },\n {\n \"code\": null,\n \"e\": 26793,\n \"s\": 26769,\n \"text\": \"Technical Scripter 2020\"\n },\n {\n \"code\": null,\n \"e\": 26798,\n \"s\": 26793,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 26812,\n \"s\": 26798,\n \"text\": \"Java Programs\"\n },\n {\n \"code\": null,\n \"e\": 26831,\n \"s\": 26812,\n \"text\": \"Technical Scripter\"\n },\n {\n \"code\": null,\n \"e\": 26836,\n \"s\": 26831,\n \"text\": \"Java\"\n },\n {\n \"code\": null,\n \"e\": 26934,\n \"s\": 26836,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 26943,\n \"s\": 26934,\n \"text\": \"Comments\"\n },\n {\n \"code\": null,\n \"e\": 26956,\n \"s\": 26943,\n \"text\": \"Old Comments\"\n },\n {\n \"code\": null,\n \"e\": 26986,\n \"s\": 26956,\n \"text\": \"Functional Interfaces in Java\"\n },\n {\n \"code\": null,\n \"e\": 27001,\n \"s\": 26986,\n \"text\": \"Stream In Java\"\n },\n {\n \"code\": null,\n \"e\": 27022,\n \"s\": 27001,\n \"text\": \"Constructors in Java\"\n },\n {\n \"code\": null,\n \"e\": 27068,\n \"s\": 27022,\n \"text\": \"Different ways of Reading a text file in Java\"\n },\n {\n \"code\": null,\n \"e\": 27087,\n \"s\": 27068,\n \"text\": \"Exceptions in Java\"\n },\n {\n \"code\": null,\n \"e\": 27131,\n \"s\": 27087,\n \"text\": \"Convert a String to Character array in Java\"\n },\n {\n \"code\": null,\n \"e\": 27157,\n \"s\": 27131,\n \"text\": \"Java Programming Examples\"\n },\n {\n \"code\": null,\n \"e\": 27191,\n \"s\": 27157,\n \"text\": \"Convert Double to Integer in Java\"\n },\n {\n \"code\": null,\n \"e\": 27238,\n \"s\": 27191,\n \"text\": \"Implementing a Linked List in Java using Class\"\n }\n]"}}},{"rowIdx":39,"cells":{"title":{"kind":"string","value":"How to Create a Git Hook to Push to Your Server and Github Repo | by Shinichi Okada | Towards Data Science"},"text":{"kind":"string","value":"Git hooks are scripts that Git executes before or after commit, push, and receive. Setting up a Git hook makes your development and deployment easy.\nIn this article, you will learn how to create a post-receive Git hook that executes when you use the git push command.\nI use $ for a local terminal prompt and # for a remote server prompt. Also for simplicity, I use john for username and Github username, and yourdomain.com for our domain name.\nYou already have a working Git repository on your local machine. We call it newsletter. You have a Linux server for your website (I use Ubuntu 20.04) and you can use ssh to connect from your local computer to your remote server.\nI assume you secured your server and set firewalls on your server. You can connect to your server using SSH.\nLet’s connect to your server:\n$ ssh john@yourdomain.com// or use your IP address$ ssh john@167.99.123.45\nOnce you are in the remote server, create a new directory, newsletter:\n# pwd/home/john# mkdir newsletter# cd newsletter\nInitialize an empty Git repository in a new directory:\n# git init --bare\nThis will omit the working directory but create the directories and files we need.\nCreate a new directory under /var/www. We are going to redirect all files to this directory.\n# sudo mkdir /var/www/newsletter/public_html\nUsing git remote set-url allows you to set multiple Git repo URLs.\nFind out your current remote config using the git remote -v command:\n$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin git@github.com:john/newsletter.git (push)\nLet’s set remote URLs. One for our remote server and one for our Github repo.\n$ git remote set-url --add --push origin john@yourdomain.com:/home/john/newsletter$ git remote set-url --add --push origin git@github.com:john/newsletter.git\nYou need to run git remote set-url twice as above since the first one will overwrite the current one.\nNow you should have one fetch and two pushes:\n$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin john@yourdomain.com:/home/john/newslette (push)origin git@github.com:john/newsletter.git (push)\nOn your remote server, create a file ~/newsletter/hooks/post-receive.\n# cd ~/newsletter/hooks# touch post-receive\nAdd the following:\nWe need to make the file executable:\n# chmod +x post-receive# $ ls -Al ~/newsletter/hookstotal 56-rwxrwxr-x 1 shin shin 478 Apr 24 03:07 applypatch-msg.sample-rwxrwxr-x 1 shin shin 896 Apr 24 03:07 commit-msg.sample-rwxrwxr-x 1 shin shin 3079 Apr 24 03:07 fsmonitor-watchman.sample-rwxrwxr-x 1 shin shin 732 May 3 00:58 post-receive-rwxrwxr-x 1 shin shin 189 Apr 24 03:07 post-update.sample-rwxrwxr-x 1 shin shin 424 Apr 24 03:07 pre-applypatch.sample...\nThe post-receive file mode should have -rwxrwxr-x.\nOn your local machine, run git push origin main:\n$ git push origin mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Push received! Deploying branch: main...remote: Already on 'main'To okadia.net:/home/john/newsletter 2b35421..aa80729 main -> mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Resolving deltas: 100% (2/2), completed with 2 local objects.To github.com:john/newsletter.git 2b35421..aa80729 main -> main\nThe Git hook, post-receive, is an excellent tool for developers who frequently work with a server. Git has more tools for client and server-side hooks at your disposal. How about start using it for your project?\nIf you like my article and would like to receive newsletters, please sign up.\nGet full access to every story on Medium by becoming a member.\nGithooks\nDeploying Code with a Git Hook on a DigitalOcean Droplet"},"parsed":{"kind":"list like","value":[{"code":null,"e":321,"s":172,"text":"Git hooks are scripts that Git executes before or after commit, push, and receive. Setting up a Git hook makes your development and deployment easy."},{"code":null,"e":440,"s":321,"text":"In this article, you will learn how to create a post-receive Git hook that executes when you use the git push command."},{"code":null,"e":616,"s":440,"text":"I use $ for a local terminal prompt and # for a remote server prompt. Also for simplicity, I use john for username and Github username, and yourdomain.com for our domain name."},{"code":null,"e":845,"s":616,"text":"You already have a working Git repository on your local machine. We call it newsletter. You have a Linux server for your website (I use Ubuntu 20.04) and you can use ssh to connect from your local computer to your remote server."},{"code":null,"e":954,"s":845,"text":"I assume you secured your server and set firewalls on your server. You can connect to your server using SSH."},{"code":null,"e":984,"s":954,"text":"Let’s connect to your server:"},{"code":null,"e":1059,"s":984,"text":"$ ssh john@yourdomain.com// or use your IP address$ ssh john@167.99.123.45"},{"code":null,"e":1130,"s":1059,"text":"Once you are in the remote server, create a new directory, newsletter:"},{"code":null,"e":1179,"s":1130,"text":"# pwd/home/john# mkdir newsletter# cd newsletter"},{"code":null,"e":1234,"s":1179,"text":"Initialize an empty Git repository in a new directory:"},{"code":null,"e":1252,"s":1234,"text":"# git init --bare"},{"code":null,"e":1335,"s":1252,"text":"This will omit the working directory but create the directories and files we need."},{"code":null,"e":1428,"s":1335,"text":"Create a new directory under /var/www. We are going to redirect all files to this directory."},{"code":null,"e":1473,"s":1428,"text":"# sudo mkdir /var/www/newsletter/public_html"},{"code":null,"e":1540,"s":1473,"text":"Using git remote set-url allows you to set multiple Git repo URLs."},{"code":null,"e":1609,"s":1540,"text":"Find out your current remote config using the git remote -v command:"},{"code":null,"e":1722,"s":1609,"text":"$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin git@github.com:john/newsletter.git (push)"},{"code":null,"e":1800,"s":1722,"text":"Let’s set remote URLs. One for our remote server and one for our Github repo."},{"code":null,"e":1958,"s":1800,"text":"$ git remote set-url --add --push origin john@yourdomain.com:/home/john/newsletter$ git remote set-url --add --push origin git@github.com:john/newsletter.git"},{"code":null,"e":2060,"s":1958,"text":"You need to run git remote set-url twice as above since the first one will overwrite the current one."},{"code":null,"e":2106,"s":2060,"text":"Now you should have one fetch and two pushes:"},{"code":null,"e":2273,"s":2106,"text":"$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin john@yourdomain.com:/home/john/newslette (push)origin git@github.com:john/newsletter.git (push)"},{"code":null,"e":2343,"s":2273,"text":"On your remote server, create a file ~/newsletter/hooks/post-receive."},{"code":null,"e":2387,"s":2343,"text":"# cd ~/newsletter/hooks# touch post-receive"},{"code":null,"e":2406,"s":2387,"text":"Add the following:"},{"code":null,"e":2443,"s":2406,"text":"We need to make the file executable:"},{"code":null,"e":2867,"s":2443,"text":"# chmod +x post-receive# $ ls -Al ~/newsletter/hookstotal 56-rwxrwxr-x 1 shin shin 478 Apr 24 03:07 applypatch-msg.sample-rwxrwxr-x 1 shin shin 896 Apr 24 03:07 commit-msg.sample-rwxrwxr-x 1 shin shin 3079 Apr 24 03:07 fsmonitor-watchman.sample-rwxrwxr-x 1 shin shin 732 May 3 00:58 post-receive-rwxrwxr-x 1 shin shin 189 Apr 24 03:07 post-update.sample-rwxrwxr-x 1 shin shin 424 Apr 24 03:07 pre-applypatch.sample..."},{"code":null,"e":2918,"s":2867,"text":"The post-receive file mode should have -rwxrwxr-x."},{"code":null,"e":2967,"s":2918,"text":"On your local machine, run git push origin main:"},{"code":null,"e":3772,"s":2967,"text":"$ git push origin mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Push received! Deploying branch: main...remote: Already on 'main'To okadia.net:/home/john/newsletter 2b35421..aa80729 main -> mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Resolving deltas: 100% (2/2), completed with 2 local objects.To github.com:john/newsletter.git 2b35421..aa80729 main -> main"},{"code":null,"e":3984,"s":3772,"text":"The Git hook, post-receive, is an excellent tool for developers who frequently work with a server. Git has more tools for client and server-side hooks at your disposal. How about start using it for your project?"},{"code":null,"e":4062,"s":3984,"text":"If you like my article and would like to receive newsletters, please sign up."},{"code":null,"e":4125,"s":4062,"text":"Get full access to every story on Medium by becoming a member."},{"code":null,"e":4134,"s":4125,"text":"Githooks"}],"string":"[\n {\n \"code\": null,\n \"e\": 321,\n \"s\": 172,\n \"text\": \"Git hooks are scripts that Git executes before or after commit, push, and receive. Setting up a Git hook makes your development and deployment easy.\"\n },\n {\n \"code\": null,\n \"e\": 440,\n \"s\": 321,\n \"text\": \"In this article, you will learn how to create a post-receive Git hook that executes when you use the git push command.\"\n },\n {\n \"code\": null,\n \"e\": 616,\n \"s\": 440,\n \"text\": \"I use $ for a local terminal prompt and # for a remote server prompt. Also for simplicity, I use john for username and Github username, and yourdomain.com for our domain name.\"\n },\n {\n \"code\": null,\n \"e\": 845,\n \"s\": 616,\n \"text\": \"You already have a working Git repository on your local machine. We call it newsletter. You have a Linux server for your website (I use Ubuntu 20.04) and you can use ssh to connect from your local computer to your remote server.\"\n },\n {\n \"code\": null,\n \"e\": 954,\n \"s\": 845,\n \"text\": \"I assume you secured your server and set firewalls on your server. You can connect to your server using SSH.\"\n },\n {\n \"code\": null,\n \"e\": 984,\n \"s\": 954,\n \"text\": \"Let’s connect to your server:\"\n },\n {\n \"code\": null,\n \"e\": 1059,\n \"s\": 984,\n \"text\": \"$ ssh john@yourdomain.com// or use your IP address$ ssh john@167.99.123.45\"\n },\n {\n \"code\": null,\n \"e\": 1130,\n \"s\": 1059,\n \"text\": \"Once you are in the remote server, create a new directory, newsletter:\"\n },\n {\n \"code\": null,\n \"e\": 1179,\n \"s\": 1130,\n \"text\": \"# pwd/home/john# mkdir newsletter# cd newsletter\"\n },\n {\n \"code\": null,\n \"e\": 1234,\n \"s\": 1179,\n \"text\": \"Initialize an empty Git repository in a new directory:\"\n },\n {\n \"code\": null,\n \"e\": 1252,\n \"s\": 1234,\n \"text\": \"# git init --bare\"\n },\n {\n \"code\": null,\n \"e\": 1335,\n \"s\": 1252,\n \"text\": \"This will omit the working directory but create the directories and files we need.\"\n },\n {\n \"code\": null,\n \"e\": 1428,\n \"s\": 1335,\n \"text\": \"Create a new directory under /var/www. We are going to redirect all files to this directory.\"\n },\n {\n \"code\": null,\n \"e\": 1473,\n \"s\": 1428,\n \"text\": \"# sudo mkdir /var/www/newsletter/public_html\"\n },\n {\n \"code\": null,\n \"e\": 1540,\n \"s\": 1473,\n \"text\": \"Using git remote set-url allows you to set multiple Git repo URLs.\"\n },\n {\n \"code\": null,\n \"e\": 1609,\n \"s\": 1540,\n \"text\": \"Find out your current remote config using the git remote -v command:\"\n },\n {\n \"code\": null,\n \"e\": 1722,\n \"s\": 1609,\n \"text\": \"$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin git@github.com:john/newsletter.git (push)\"\n },\n {\n \"code\": null,\n \"e\": 1800,\n \"s\": 1722,\n \"text\": \"Let’s set remote URLs. One for our remote server and one for our Github repo.\"\n },\n {\n \"code\": null,\n \"e\": 1958,\n \"s\": 1800,\n \"text\": \"$ git remote set-url --add --push origin john@yourdomain.com:/home/john/newsletter$ git remote set-url --add --push origin git@github.com:john/newsletter.git\"\n },\n {\n \"code\": null,\n \"e\": 2060,\n \"s\": 1958,\n \"text\": \"You need to run git remote set-url twice as above since the first one will overwrite the current one.\"\n },\n {\n \"code\": null,\n \"e\": 2106,\n \"s\": 2060,\n \"text\": \"Now you should have one fetch and two pushes:\"\n },\n {\n \"code\": null,\n \"e\": 2273,\n \"s\": 2106,\n \"text\": \"$ git remote -vorigin git@github.com:john/newsletter.git (fetch)origin john@yourdomain.com:/home/john/newslette (push)origin git@github.com:john/newsletter.git (push)\"\n },\n {\n \"code\": null,\n \"e\": 2343,\n \"s\": 2273,\n \"text\": \"On your remote server, create a file ~/newsletter/hooks/post-receive.\"\n },\n {\n \"code\": null,\n \"e\": 2387,\n \"s\": 2343,\n \"text\": \"# cd ~/newsletter/hooks# touch post-receive\"\n },\n {\n \"code\": null,\n \"e\": 2406,\n \"s\": 2387,\n \"text\": \"Add the following:\"\n },\n {\n \"code\": null,\n \"e\": 2443,\n \"s\": 2406,\n \"text\": \"We need to make the file executable:\"\n },\n {\n \"code\": null,\n \"e\": 2867,\n \"s\": 2443,\n \"text\": \"# chmod +x post-receive# $ ls -Al ~/newsletter/hookstotal 56-rwxrwxr-x 1 shin shin 478 Apr 24 03:07 applypatch-msg.sample-rwxrwxr-x 1 shin shin 896 Apr 24 03:07 commit-msg.sample-rwxrwxr-x 1 shin shin 3079 Apr 24 03:07 fsmonitor-watchman.sample-rwxrwxr-x 1 shin shin 732 May 3 00:58 post-receive-rwxrwxr-x 1 shin shin 189 Apr 24 03:07 post-update.sample-rwxrwxr-x 1 shin shin 424 Apr 24 03:07 pre-applypatch.sample...\"\n },\n {\n \"code\": null,\n \"e\": 2918,\n \"s\": 2867,\n \"text\": \"The post-receive file mode should have -rwxrwxr-x.\"\n },\n {\n \"code\": null,\n \"e\": 2967,\n \"s\": 2918,\n \"text\": \"On your local machine, run git push origin main:\"\n },\n {\n \"code\": null,\n \"e\": 3772,\n \"s\": 2967,\n \"text\": \"$ git push origin mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Push received! Deploying branch: main...remote: Already on 'main'To okadia.net:/home/john/newsletter 2b35421..aa80729 main -> mainEnumerating objects: 5, done.Counting objects: 100% (5/5), done.Delta compression using up to 4 threadsCompressing objects: 100% (3/3), done.Writing objects: 100% (3/3), 303 bytes | 303.00 KiB/s, done.Total 3 (delta 2), reused 0 (delta 0), pack-reused 0remote: Resolving deltas: 100% (2/2), completed with 2 local objects.To github.com:john/newsletter.git 2b35421..aa80729 main -> main\"\n },\n {\n \"code\": null,\n \"e\": 3984,\n \"s\": 3772,\n \"text\": \"The Git hook, post-receive, is an excellent tool for developers who frequently work with a server. Git has more tools for client and server-side hooks at your disposal. How about start using it for your project?\"\n },\n {\n \"code\": null,\n \"e\": 4062,\n \"s\": 3984,\n \"text\": \"If you like my article and would like to receive newsletters, please sign up.\"\n },\n {\n \"code\": null,\n \"e\": 4125,\n \"s\": 4062,\n \"text\": \"Get full access to every story on Medium by becoming a member.\"\n },\n {\n \"code\": null,\n \"e\": 4134,\n \"s\": 4125,\n \"text\": \"Githooks\"\n }\n]"}}},{"rowIdx":40,"cells":{"title":{"kind":"string","value":"Maximum sum Rectangle | Practice | GeeksforGeeks"},"text":{"kind":"string","value":"Given a 2D matrix M of dimensions RxC. Find the maximum sum submatrix in it.\nExample 1:\nInput:\nR=4\nC=5\nM=[[1,2,-1,-4,-20],\n[-8,-3,4,2,1],\n[3,8,10,1,3],\n[-4,-1,1,7,-6]]\nOutput:\n29\nExplanation:\nThe matrix is as follows and the\nblue rectangle denotes the maximum sum\nrectangle.\n\n\nExample 2:\nInput:\nR=2\nC=2\nM=[[-1,-2],[-3,-4]]\nOutput:\n-1\nExplanation:\nTaking only the first cell is the \noptimal choice.\n\nYour Task:\nYou don't need to read input or print anything. Your task is to complete the function maximumSumRectangle() which takes the number R, C, and the 2D matrix M as input parameters and returns the maximum sum submatrix.\n\nExpected Time Complexity:O(R*R*C)\nExpected Auxillary Space:O(R*C)\n \nConstraints:\n1<=R,C<=500\n-1000<=M[i][j]<=1000\n0\nanutigerin 9 hours\n int ans = 0;\n int ans1 = -1000;\n for(int i = 0 ; i < R ; i ++){\n for(int j = 0 ; j < C ; j ++){\n ans1 = max(ans1,M[i][j]);\n }\n }\n for(int i = 0 ; i < C ; i ++){\n vector< int > dp(R,0);\n for(int j = i ; j < C ; j++){\n for(int k = 0 ; k < R; k ++){\n dp[k] += M[k][j];\n }\n int res = 0;\n for(int k = 0 ; k < R ; k ++){\n res += dp[k];\n if(res < 0) res = 0;\n ans = max(ans,res);\n }\n }\n }\n if(ans == 0) return ans1;\n return ans;\n0\nwjyjobs1 week ago\nclass Solution:\n def maximumSumRectangle(self,R,C,M):\n for r in M:\n for i in range(1, C):\n r[i] += r[i-1]\n \n ans = float('-inf') \n for l in range(C):\n for r in range(l, C):\n running = 0\n for i in range(R):\n v = M[i][r]\n if l > 0:\n v -= M[i][l-1]\n running += v\n ans = max(ans, running)\n if running < 0:\n running = 0\n return ans\n+1\nabhishekkaswan1 week ago\nCan do it using O(C*C*R) like this also , really easy C++ solution\nint kadanel(vector &res)\n {\n int max1=INT_MIN,max2=0;\n for(int i=0;i> M) {\n int ans=INT_MIN;\n for(int i=0;i res(r);\n for(int j=i;j v,int n){\n int ma = INT_MIN;\n int untill = 0;\n for(int i=0;ima) ma= untill;\n if(untill<0) untill = 0;\n }\n return ma;\n }\n int maximumSumRectangle(int m, int n, vector> M) {\n // code here\n int maxi = INT_MIN;\n for(int i=0;i res(n,0);\n for(int j=i;j>& m) {\n vector< vector > rsum(m.begin(),m.end()) ;\n \n for(int i=0 ; i> acc(R, vector(C + 1));\nint glo = INT_MIN;\n\n// prefix-sum array\nfor (int i = 0; i < R; ++i)\n\tfor (int j = 0; j < C; ++j)\n\t\tacc[i][j + 1] = acc[i][j] + M[i][j];\n\t\t\nfor (int i = 0; i < C; ++i)\n{\n\tfor (int j = i; j < C; ++j)\n\t{\n\t\t// kadane\n\t\tint loc = 0;\n\t\t\n\t\tfor (int k = 0; k < R; ++k)\n\t\t{\n\t\t\t// use prefix-sum to avoid repeated accumulation.\n\t\t\tloc += acc[k][j + 1] - acc[k][i]; \n\t\t\tglo = max(glo, loc);\n\t\t\tloc = max(loc, 0);\n\t\t}\n\t}\n}\n\nreturn glo;\n+1\npriyankapardesiramachander1 week ago\nMy solution in C# → https://github.com/ramacpr/G4G_DailyCodingProblems/tree/master/April15_2022/MaxSumRectangle \n0\nakshayadivarekar7771 week ago\n//JAVA Solution\n// Its just KADANE\nclass Solution {\n int maximumSumRectangle(int R, int C, int M[][]) {\n // code here\n \n int max = 0;\n int maxop=Integer.MIN_VALUE;\n int[] dp = new int[R];\n \n for(int i=0;i dp(R,0);\n for(int j = i ; j < C ; j++){\n for(int k = 0 ; k < R; k ++){\n dp[k] += M[k][j];\n }\n int res = 0;\n for(int k = 0 ; k < R ; k ++){\n res += dp[k];\n if(res < 0) res = 0;\n ans = max(ans,res);\n }\n }\n }\n if(ans == 0) return ans1;\n return ans;"},{"code":null,"e":1704,"s":1702,"text":"0"},{"code":null,"e":1722,"s":1704,"text":"wjyjobs1 week ago"},{"code":null,"e":2302,"s":1722,"text":"class Solution:\n def maximumSumRectangle(self,R,C,M):\n for r in M:\n for i in range(1, C):\n r[i] += r[i-1]\n \n ans = float('-inf') \n for l in range(C):\n for r in range(l, C):\n running = 0\n for i in range(R):\n v = M[i][r]\n if l > 0:\n v -= M[i][l-1]\n running += v\n ans = max(ans, running)\n if running < 0:\n running = 0\n return ans"},{"code":null,"e":2305,"s":2302,"text":"+1"},{"code":null,"e":2330,"s":2305,"text":"abhishekkaswan1 week ago"},{"code":null,"e":2397,"s":2330,"text":"Can do it using O(C*C*R) like this also , really easy C++ solution"},{"code":null,"e":3120,"s":2397,"text":"int kadanel(vector &res)\n {\n int max1=INT_MIN,max2=0;\n for(int i=0;i> M) {\n int ans=INT_MIN;\n for(int i=0;i res(r);\n for(int j=i;j v,int n){\n int ma = INT_MIN;\n int untill = 0;\n for(int i=0;ima) ma= untill;\n if(untill<0) untill = 0;\n }\n return ma;\n }\n int maximumSumRectangle(int m, int n, vector> M) {\n // code here\n int maxi = INT_MIN;\n for(int i=0;i res(n,0);\n for(int j=i;j>& m) {\n vector< vector > rsum(m.begin(),m.end()) ;\n \n for(int i=0 ; i> acc(R, vector(C + 1));\nint glo = INT_MIN;\n\n// prefix-sum array\nfor (int i = 0; i < R; ++i)\n\tfor (int j = 0; j < C; ++j)\n\t\tacc[i][j + 1] = acc[i][j] + M[i][j];\n\t\t\nfor (int i = 0; i < C; ++i)\n{\n\tfor (int j = i; j < C; ++j)\n\t{\n\t\t// kadane\n\t\tint loc = 0;\n\t\t\n\t\tfor (int k = 0; k < R; ++k)\n\t\t{\n\t\t\t// use prefix-sum to avoid repeated accumulation.\n\t\t\tloc += acc[k][j + 1] - acc[k][i]; \n\t\t\tglo = max(glo, loc);\n\t\t\tloc = max(loc, 0);\n\t\t}\n\t}\n}\n\nreturn glo;"},{"code":null,"e":6513,"s":6510,"text":"+1"},{"code":null,"e":6550,"s":6513,"text":"priyankapardesiramachander1 week ago"},{"code":null,"e":6663,"s":6550,"text":"My solution in C# → https://github.com/ramacpr/G4G_DailyCodingProblems/tree/master/April15_2022/MaxSumRectangle "},{"code":null,"e":6665,"s":6663,"text":"0"},{"code":null,"e":6695,"s":6665,"text":"akshayadivarekar7771 week ago"},{"code":null,"e":6730,"s":6695,"text":"//JAVA Solution\n// Its just KADANE"},{"code":null,"e":7614,"s":6730,"text":"class Solution {\n int maximumSumRectangle(int R, int C, int M[][]) {\n // code here\n \n int max = 0;\n int maxop=Integer.MIN_VALUE;\n int[] dp = new int[R];\n \n for(int i=0;i dp(R,0);\\n for(int j = i ; j < C ; j++){\\n for(int k = 0 ; k < R; k ++){\\n dp[k] += M[k][j];\\n }\\n int res = 0;\\n for(int k = 0 ; k < R ; k ++){\\n res += dp[k];\\n if(res < 0) res = 0;\\n ans = max(ans,res);\\n }\\n }\\n }\\n if(ans == 0) return ans1;\\n return ans;\"\n },\n {\n \"code\": null,\n \"e\": 1704,\n \"s\": 1702,\n \"text\": \"0\"\n },\n {\n \"code\": null,\n \"e\": 1722,\n \"s\": 1704,\n \"text\": \"wjyjobs1 week ago\"\n },\n {\n \"code\": null,\n \"e\": 2302,\n \"s\": 1722,\n \"text\": \"class Solution:\\n def maximumSumRectangle(self,R,C,M):\\n for r in M:\\n for i in range(1, C):\\n r[i] += r[i-1]\\n \\n ans = float('-inf') \\n for l in range(C):\\n for r in range(l, C):\\n running = 0\\n for i in range(R):\\n v = M[i][r]\\n if l > 0:\\n v -= M[i][l-1]\\n running += v\\n ans = max(ans, running)\\n if running < 0:\\n running = 0\\n return ans\"\n },\n {\n \"code\": null,\n \"e\": 2305,\n \"s\": 2302,\n \"text\": \"+1\"\n },\n {\n \"code\": null,\n \"e\": 2330,\n \"s\": 2305,\n \"text\": \"abhishekkaswan1 week ago\"\n },\n {\n \"code\": null,\n \"e\": 2397,\n \"s\": 2330,\n \"text\": \"Can do it using O(C*C*R) like this also , really easy C++ solution\"\n },\n {\n \"code\": null,\n \"e\": 3120,\n \"s\": 2397,\n \"text\": \"int kadanel(vector &res)\\n {\\n int max1=INT_MIN,max2=0;\\n for(int i=0;i> M) {\\n int ans=INT_MIN;\\n for(int i=0;i res(r);\\n for(int j=i;j v,int n){\\n int ma = INT_MIN;\\n int untill = 0;\\n for(int i=0;ima) ma= untill;\\n if(untill<0) untill = 0;\\n }\\n return ma;\\n }\\n int maximumSumRectangle(int m, int n, vector> M) {\\n // code here\\n int maxi = INT_MIN;\\n for(int i=0;i res(n,0);\\n for(int j=i;j>& m) {\\n vector< vector > rsum(m.begin(),m.end()) ;\\n \\n for(int i=0 ; i> acc(R, vector(C + 1));\\nint glo = INT_MIN;\\n\\n// prefix-sum array\\nfor (int i = 0; i < R; ++i)\\n\\tfor (int j = 0; j < C; ++j)\\n\\t\\tacc[i][j + 1] = acc[i][j] + M[i][j];\\n\\t\\t\\nfor (int i = 0; i < C; ++i)\\n{\\n\\tfor (int j = i; j < C; ++j)\\n\\t{\\n\\t\\t// kadane\\n\\t\\tint loc = 0;\\n\\t\\t\\n\\t\\tfor (int k = 0; k < R; ++k)\\n\\t\\t{\\n\\t\\t\\t// use prefix-sum to avoid repeated accumulation.\\n\\t\\t\\tloc += acc[k][j + 1] - acc[k][i]; \\n\\t\\t\\tglo = max(glo, loc);\\n\\t\\t\\tloc = max(loc, 0);\\n\\t\\t}\\n\\t}\\n}\\n\\nreturn glo;\"\n },\n {\n \"code\": null,\n \"e\": 6513,\n \"s\": 6510,\n \"text\": \"+1\"\n },\n {\n \"code\": null,\n \"e\": 6550,\n \"s\": 6513,\n \"text\": \"priyankapardesiramachander1 week ago\"\n },\n {\n \"code\": null,\n \"e\": 6663,\n \"s\": 6550,\n \"text\": \"My solution in C# → https://github.com/ramacpr/G4G_DailyCodingProblems/tree/master/April15_2022/MaxSumRectangle \"\n },\n {\n \"code\": null,\n \"e\": 6665,\n \"s\": 6663,\n \"text\": \"0\"\n },\n {\n \"code\": null,\n \"e\": 6695,\n \"s\": 6665,\n \"text\": \"akshayadivarekar7771 week ago\"\n },\n {\n \"code\": null,\n \"e\": 6730,\n \"s\": 6695,\n \"text\": \"//JAVA Solution\\n// Its just KADANE\"\n },\n {\n \"code\": null,\n \"e\": 7614,\n \"s\": 6730,\n \"text\": \"class Solution {\\n int maximumSumRectangle(int R, int C, int M[][]) {\\n // code here\\n \\n int max = 0;\\n int maxop=Integer.MIN_VALUE;\\n int[] dp = new int[R];\\n \\n for(int i=0;i list = new ArrayList<>();\n\n for(int i = 0; i< 10; i++){\n //autoboxing by passing as an argument\n //int values is converted to Integer\n //by compiler during compilation\n list.add(i);\n }\n\n System.out.println(list);\n\n char c = 'a'; \n //autoboxing by assigning an char to Character object\n Character ch = c;\n System.out.println(ch);\n }\n}\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\na\nUnboxing is reverse of Autoboxing and it refers to the automatic conversion of a wrapper object to its corresponding primitive variable. The compiler automatically handles the conversion when a wrapper object is −\nPassed as an argument to a function which is expecting a primitive data type variable.\nPassed as an argument to a function which is expecting a primitive data type variable.\nassigned to a variable of the type of primitive data type variable.\nassigned to a variable of the type of primitive data type variable.\nConsider the following example.\nLive Demo\npublic class Tester {\n public static void main(String[] args) {\n Integer integer = new Integer(-10);\n //autoboxing by passing as an argument\n //Integer object is converted to int\n //by compiler during compilation\n int i = abs(integer); \n System.out.println(i);\n\n //autoboxing by assigning an Integer object to int variable\n int j = integer; \n System.out.println(j);\n }\n \n private static int abs(int i){\n return (i < 0)? -i: i;\n }\n}\n10\n-10"},"parsed":{"kind":"list like","value":[{"code":null,"e":1258,"s":1062,"text":"Autoboxing refers to the automatic conversion of a primitive type variable to its corresponding wrapper class object. The compiler automatically handles the conversion when a primitive value is −"},{"code":null,"e":1337,"s":1258,"text":"Passed as an argument to a function which is expecting a wrapper class object."},{"code":null,"e":1416,"s":1337,"text":"Passed as an argument to a function which is expecting a wrapper class object."},{"code":null,"e":1469,"s":1416,"text":"assigned to a variable of the type of wrapper class."},{"code":null,"e":1522,"s":1469,"text":"assigned to a variable of the type of wrapper class."},{"code":null,"e":1554,"s":1522,"text":"Consider the following example."},{"code":null,"e":1564,"s":1554,"text":"Live Demo"},{"code":null,"e":2114,"s":1564,"text":"import java.util.ArrayList;\nimport java.util.List;\n\npublic class Tester {\n public static void main(String[] args) {\n List list = new ArrayList<>();\n\n for(int i = 0; i< 10; i++){\n //autoboxing by passing as an argument\n //int values is converted to Integer\n //by compiler during compilation\n list.add(i);\n }\n\n System.out.println(list);\n\n char c = 'a'; \n //autoboxing by assigning an char to Character object\n Character ch = c;\n System.out.println(ch);\n }\n}"},{"code":null,"e":2147,"s":2114,"text":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\na"},{"code":null,"e":2361,"s":2147,"text":"Unboxing is reverse of Autoboxing and it refers to the automatic conversion of a wrapper object to its corresponding primitive variable. The compiler automatically handles the conversion when a wrapper object is −"},{"code":null,"e":2448,"s":2361,"text":"Passed as an argument to a function which is expecting a primitive data type variable."},{"code":null,"e":2535,"s":2448,"text":"Passed as an argument to a function which is expecting a primitive data type variable."},{"code":null,"e":2603,"s":2535,"text":"assigned to a variable of the type of primitive data type variable."},{"code":null,"e":2671,"s":2603,"text":"assigned to a variable of the type of primitive data type variable."},{"code":null,"e":2703,"s":2671,"text":"Consider the following example."},{"code":null,"e":2713,"s":2703,"text":"Live Demo"},{"code":null,"e":3217,"s":2713,"text":"public class Tester {\n public static void main(String[] args) {\n Integer integer = new Integer(-10);\n //autoboxing by passing as an argument\n //Integer object is converted to int\n //by compiler during compilation\n int i = abs(integer); \n System.out.println(i);\n\n //autoboxing by assigning an Integer object to int variable\n int j = integer; \n System.out.println(j);\n }\n \n private static int abs(int i){\n return (i < 0)? -i: i;\n }\n}"},{"code":null,"e":3224,"s":3217,"text":"10\n-10"}],"string":"[\n {\n \"code\": null,\n \"e\": 1258,\n \"s\": 1062,\n \"text\": \"Autoboxing refers to the automatic conversion of a primitive type variable to its corresponding wrapper class object. The compiler automatically handles the conversion when a primitive value is −\"\n },\n {\n \"code\": null,\n \"e\": 1337,\n \"s\": 1258,\n \"text\": \"Passed as an argument to a function which is expecting a wrapper class object.\"\n },\n {\n \"code\": null,\n \"e\": 1416,\n \"s\": 1337,\n \"text\": \"Passed as an argument to a function which is expecting a wrapper class object.\"\n },\n {\n \"code\": null,\n \"e\": 1469,\n \"s\": 1416,\n \"text\": \"assigned to a variable of the type of wrapper class.\"\n },\n {\n \"code\": null,\n \"e\": 1522,\n \"s\": 1469,\n \"text\": \"assigned to a variable of the type of wrapper class.\"\n },\n {\n \"code\": null,\n \"e\": 1554,\n \"s\": 1522,\n \"text\": \"Consider the following example.\"\n },\n {\n \"code\": null,\n \"e\": 1564,\n \"s\": 1554,\n \"text\": \"Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 2114,\n \"s\": 1564,\n \"text\": \"import java.util.ArrayList;\\nimport java.util.List;\\n\\npublic class Tester {\\n public static void main(String[] args) {\\n List list = new ArrayList<>();\\n\\n for(int i = 0; i< 10; i++){\\n //autoboxing by passing as an argument\\n //int values is converted to Integer\\n //by compiler during compilation\\n list.add(i);\\n }\\n\\n System.out.println(list);\\n\\n char c = 'a'; \\n //autoboxing by assigning an char to Character object\\n Character ch = c;\\n System.out.println(ch);\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 2147,\n \"s\": 2114,\n \"text\": \"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\\na\"\n },\n {\n \"code\": null,\n \"e\": 2361,\n \"s\": 2147,\n \"text\": \"Unboxing is reverse of Autoboxing and it refers to the automatic conversion of a wrapper object to its corresponding primitive variable. The compiler automatically handles the conversion when a wrapper object is −\"\n },\n {\n \"code\": null,\n \"e\": 2448,\n \"s\": 2361,\n \"text\": \"Passed as an argument to a function which is expecting a primitive data type variable.\"\n },\n {\n \"code\": null,\n \"e\": 2535,\n \"s\": 2448,\n \"text\": \"Passed as an argument to a function which is expecting a primitive data type variable.\"\n },\n {\n \"code\": null,\n \"e\": 2603,\n \"s\": 2535,\n \"text\": \"assigned to a variable of the type of primitive data type variable.\"\n },\n {\n \"code\": null,\n \"e\": 2671,\n \"s\": 2603,\n \"text\": \"assigned to a variable of the type of primitive data type variable.\"\n },\n {\n \"code\": null,\n \"e\": 2703,\n \"s\": 2671,\n \"text\": \"Consider the following example.\"\n },\n {\n \"code\": null,\n \"e\": 2713,\n \"s\": 2703,\n \"text\": \"Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 3217,\n \"s\": 2713,\n \"text\": \"public class Tester {\\n public static void main(String[] args) {\\n Integer integer = new Integer(-10);\\n //autoboxing by passing as an argument\\n //Integer object is converted to int\\n //by compiler during compilation\\n int i = abs(integer); \\n System.out.println(i);\\n\\n //autoboxing by assigning an Integer object to int variable\\n int j = integer; \\n System.out.println(j);\\n }\\n \\n private static int abs(int i){\\n return (i < 0)? -i: i;\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 3224,\n \"s\": 3217,\n \"text\": \"10\\n-10\"\n }\n]"}}},{"rowIdx":43,"cells":{"title":{"kind":"string","value":"Bayesian Hierarchical Modeling (or “more reasons why autoML cannot replace Data Scientists yet”) | by Alain Tanguy | Towards Data Science"},"text":{"kind":"string","value":"In this article, we will use a Probabilistic Programming library developed for python, pymc3. A first introduction to the efficiency of Bayesian approaches in basic statistics and to pymc3 can be found here and there.\nSince early 2018, Automated Machine Learning has become one of the trendiest topic in data science. Amazon’s Sagemaker or Google AutoML to mention just a few are now accessible to most Data Scientists, to such an extent that some tend to think exploring and understanding data is not necessary anymore in order to build machine learning models.\nThe promise of AutoML can be summed up this way:\nthe high degree of automation allows non-experts to make use of machine learning models and techniques without requiring to become an expert in a particular field first.1\nThe reasoning is straightforward ; expertise is no longer required, just give data to the AutoML algorithm, and after testing a fixed number of predefined models, it will return the best one.\nBut here’s the catch... AutoML algorithms ignore what best means to us, and will merely try to minimize an empirical error. Knowledge and expertise are still required to understand what this error really means, and to what extent it differs from the one we are actually hoping to minimize.\nGiven n data points, the empirical error is given by\nfor a particular function fn over all observable values of features xi and target yi where V denotes a loss function.2\nIt is usually computed through a cross-validation or train/test process. Missing some links between covariates and target will generate a bias often leading to under/over-fitting and eventually to a higher test/cross-validated error.\nIt is too often, and wrongly, the only one taken into account by data scientists (and always by autoML engines).\ngeneralization error(also known as the out-of-sample error) is a measure of how accurately an algorithm is able to predict outcome values for previously unseen data. It is defined by:\nfor a particular function fn over all possible values of features x and target y where V denotes a loss function and ρ(x, y) is the unknown joint probability distribution for x and y.3\nObviously, it is usually the one we would prefer to minimize when training our model. Unfortunately, it cannot be computed directly from data, unlike the empirical one which is merely an estimate of the former.\nWith enough good quality data, we interchange those. Now imagine only part of the information is available at training time, or that there just isn’t enough data at all, as it is often the case in Machine Learning. Our model will then be trained on a different distribution ρ(x, y) from the one it will be running on later, and as a consequence, the empirical error will diverge from the generalized one.\nBiased data leads to biased empirical error.\nWe can already see how much of a problem this is: the empirical error does not estimate the desired quantity anymore, but is the only we can compute from data. We need to make sure it can still be relied on to some extent. This is where human expertise comes into plays and shows its full potential.\nThe same way parametric approaches are preferred over non-parametric ones when the relation between variables is clear, knowledge from the field of study compensate the lack of information from the data by explicitly modeling some dependences.\nThe better our model can extrapolate from partial or truncated distribution, the closer empirical and generalization error will be.\n“Correlation does not imply causation.”\nAll data scientists have heard this sentence at least once, but it turns out only a few truly realize the implication when it comes to actual modeling. The topic of causality is actually just left out most of the time, sometimes wrongly, sometimes justifiably, but rarely knowingly.\nA confounder is a variable that is causally related to both the covariate and the outcome of interest. As a causal concept, it cannot be described in terms of correlations.\nThe error yielded by confounders cannot be fully measured by traditional statistical methods since it is not a statistical error per se.\nOne could argue that when the empirical error is low, or even the generalization error; then we shouldn’t care if our model exploits true causality or spurious correlations. It might be true in some cases, but unless we explicitly know why, this situation shouldn’t be overlooked.\nSimpson’s paradox, described by Edward Simpson in 1951 and George Udny Yule in 1903, is a paradox in which a statistical trend appears when data are segmented into separate groups but reverses when the groups are combined.\nIt is usually observed in the presence of a confounding variable (represented here by the different colour groups) when causal relations are ignored.\nThe impact Simpson’s paradox will have in machine learning is when it comes to decision making, where we are given the choice of which data we should consider to pick an action, the aggregated or the partitioned?\nUnfortunately the answer usually cannot be inferred from the data itself. Indeed with exact similar values we can have different answers, depending on the causal relationships among the variables. The real source of error lies outside of the data itself and eventually it doesn’t matter how low the test or cross-validation error is. We are not safe from getting making completely wrong decisions unless we are able to correctly model the environment first.\nWe will now through a concrete example show how to handle this situation in the concrete case of hierarchical dependences.\nOften data scientists have to deal with geographic data, which have the disadvantage of not being easily exploitable by classic machine learning models. The location feature usually has a really high cardinality and is often unbalanced. However it is intuitive that parameters from models will vary from region to region, depending on local variables, but will still be closely related since they model the same phenomenon across different places.\nWe are going to see a way to handle this spatial dependence and thus to minimize all types of error listed above through the following example.\nWe consider the problem of estimating the use of contraceptive in Bangladesh, and to this end we use data come from the 1988 Bangladesh Fertility Survey. It consists of a subsample of 1934 women grouped in 60 districts, with variables are defined as follows:\nDISTRICT: identifying code for each district.\nLC: Number of living children at time of survey.\nAGE: Age of woman at time of survey.\nURBAN: Type of region of residence.\nand we’ll consider 3 logistic Bayesian regressions with different characteristics to model different possible approaches.\nHere we simply obliviate the role of the DISTRICT variable by not using it. The result is a simple logistic regression with only 4 parameters, including the intercept. The location is not seen as a confounder and each region is assumed to behave similarly.\nFor each district we fit a different logistic regression, leading to a total of 60 models with 4 parameters each. Even if we assume behaviour to vary from district to district, we don’t take any advantage of the similarities they could share.\nBayesian hierarchical modelling is a statistical model written in multiple levels that estimates the parameters of the posterior distribution using the Bayesian method. The sub-models combine to form the hierarchical model, and Bayes’ theorem is used to integrate them with the observed data and account for all the uncertainty that is present.4\nWe assume that while βs are different for each district as in the unpooled case, now the coefficients all share similarity. We can model this by assuming that each individual coefficient comes from a common group distribution:\nwith\nThough analytically intractable, probabilistic programming allows us to compute the posterior of all our parameters using Markov chain Monte Carlo (MCMC) to sample from the posteriors distributions. Again pymc3 offers an extremely intuitive way to model our network and to compute posteriors!\nWe can also easily compute a graphical representation of our Bayesian network:\nFirst of all, let’s try to understand the differences between our models.\nInspired by https://docs.pymc.io/notebooks/GLM-hierarchical.html, we can visualize the evolution of our regression parameters over the different regressions.\nWe display the coefficients of each district’s non-hierarchical posterior mean, hierarchical posterior mean, and pooled posterior mean. The small amount of data available at district level led unpooled posteriors to be spread far out and thus the shrinkage effect is really important, yet differences among mean in the hierarchical model are still significant, as betas vary in order of magnitude between districts.\nWe use the Area Under the ROC Curve (AUC) as our error measure for comparing the models. It can be seen as the probability that our models will score a randomly chosen positive class higher than a randomly chosen negative class. It is particularly interesting to us since it has the advantage of not requiring to set a threshold to assign labels.\nWe considered 2 test sets in order to measure performances, a stratified by district one and a non-stratified one. The use of a non-stratified test set is more representative of a case where the generalization error will differ significantly from the empirical model error, since features distributions among districts will vary significantly between train and test sets!\nThe measures have been averaged over multiple seeds for the test set sampling in order to be more representative of the real performance.\n┌───────────────────┬────────┬──────────┬──────────────┐│ │ POOLED │ UNPOOLED │ HIERARCHICAL │├───────────────────┼────────┼──────────┼──────────────┤│ Train │ 0.632 │ 0.818 │ 0.726 ││ Stratified Test │ 0.623 │ 0.618 │ 0.668 ││ Unstratified Test │ 0.634 │ 0.603 │ 0.663 │└───────────────────┴────────┴──────────┴──────────────┘\nThe unpooled model strongly overfits as the huge gap between the train and test AUC demonstrates.\nOn the other hand, the pooled model is strongly biased and clearly underfits the data.\nFinally, our hierarchical model performed significantly better than the others by taking advantage of parameters geographic similarities.\nThe shrinkage effect provided us with an improved statistical power and can also be seen as a smart way to regularize.\nSome districts have extremely few individuals from which to train, and thus the unstratified test error on those gets bigger with the unpooled model. The partial pooling takes into account the similarity between parameters, and provides low density districts with information from others, while keeping their specificities.\nAs a result the difference between models is even more significant regarding the unstratified test set, showing us its generalization capacity is greater as performances are almost not affected by the stratification strategy.\n=> Multi-level hierarchical Bayesian models outperform basic approaches when we have multiple sets of measurements we expect to have similarity.\nIt would be interesting to compare this Bayesian approach to other classic data preprocessing approaches (different encodings of the district variable) or algorithms (gradient boosting, random forest, etc.).\nAutoML cannot replace Data Scientists yet as it is not able to distinguish empirical error measures from actual business objectives, nor to model correctly dependences between covariates and target.\nExperts are still required to understand data and model properly the problems. To this end, they have access to a range of mathematics and informatic tools, including pymc3 library and Bayesian hierarchical models, allowing to easily model and compute distributions in the very common case of hierarchically structured data\n[1]: Wikipedia contributors. “Automated machine learning.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 18 Feb. 2020. Web. 1 Mar. 2020.\n[2,3]: Wikipedia contributors. (2020, February 22). Generalization error. In Wikipedia, The Free Encyclopedia. Retrieved 16:40, March 1, 2020, from https://en.wikipedia.org/w/index.php?title=Generalization_error&oldid=942140633\n[4]: Wikipedia contributors. “Bayesian hierarchical modeling.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 12 Dec. 2019. Web. 1 Mar. 2020.\nCausality: Models, Reasoning, and Inference, Cambridge University Press (2000, 2nd edition 2009). ISBN 0–521–77362–8.\nhttps://docs.pymc.io/notebooks/GLM-hierarchical.html\nHuq, N. M., and Cleland, J. 1990. Bangladesh Fertility Survey 1989 (Main Report). Dhaka: National Institute of Population Research and Training\nLegendre P. Spatial autocorrelation: Trouble or new paradigm? Ecology. 1993;74:1659–1673."},"parsed":{"kind":"list like","value":[{"code":null,"e":389,"s":171,"text":"In this article, we will use a Probabilistic Programming library developed for python, pymc3. A first introduction to the efficiency of Bayesian approaches in basic statistics and to pymc3 can be found here and there."},{"code":null,"e":734,"s":389,"text":"Since early 2018, Automated Machine Learning has become one of the trendiest topic in data science. Amazon’s Sagemaker or Google AutoML to mention just a few are now accessible to most Data Scientists, to such an extent that some tend to think exploring and understanding data is not necessary anymore in order to build machine learning models."},{"code":null,"e":783,"s":734,"text":"The promise of AutoML can be summed up this way:"},{"code":null,"e":954,"s":783,"text":"the high degree of automation allows non-experts to make use of machine learning models and techniques without requiring to become an expert in a particular field first.1"},{"code":null,"e":1146,"s":954,"text":"The reasoning is straightforward ; expertise is no longer required, just give data to the AutoML algorithm, and after testing a fixed number of predefined models, it will return the best one."},{"code":null,"e":1436,"s":1146,"text":"But here’s the catch... AutoML algorithms ignore what best means to us, and will merely try to minimize an empirical error. Knowledge and expertise are still required to understand what this error really means, and to what extent it differs from the one we are actually hoping to minimize."},{"code":null,"e":1489,"s":1436,"text":"Given n data points, the empirical error is given by"},{"code":null,"e":1608,"s":1489,"text":"for a particular function fn over all observable values of features xi and target yi where V denotes a loss function.2"},{"code":null,"e":1842,"s":1608,"text":"It is usually computed through a cross-validation or train/test process. Missing some links between covariates and target will generate a bias often leading to under/over-fitting and eventually to a higher test/cross-validated error."},{"code":null,"e":1955,"s":1842,"text":"It is too often, and wrongly, the only one taken into account by data scientists (and always by autoML engines)."},{"code":null,"e":2139,"s":1955,"text":"generalization error(also known as the out-of-sample error) is a measure of how accurately an algorithm is able to predict outcome values for previously unseen data. It is defined by:"},{"code":null,"e":2324,"s":2139,"text":"for a particular function fn over all possible values of features x and target y where V denotes a loss function and ρ(x, y) is the unknown joint probability distribution for x and y.3"},{"code":null,"e":2535,"s":2324,"text":"Obviously, it is usually the one we would prefer to minimize when training our model. Unfortunately, it cannot be computed directly from data, unlike the empirical one which is merely an estimate of the former."},{"code":null,"e":2940,"s":2535,"text":"With enough good quality data, we interchange those. Now imagine only part of the information is available at training time, or that there just isn’t enough data at all, as it is often the case in Machine Learning. Our model will then be trained on a different distribution ρ(x, y) from the one it will be running on later, and as a consequence, the empirical error will diverge from the generalized one."},{"code":null,"e":2985,"s":2940,"text":"Biased data leads to biased empirical error."},{"code":null,"e":3285,"s":2985,"text":"We can already see how much of a problem this is: the empirical error does not estimate the desired quantity anymore, but is the only we can compute from data. We need to make sure it can still be relied on to some extent. This is where human expertise comes into plays and shows its full potential."},{"code":null,"e":3529,"s":3285,"text":"The same way parametric approaches are preferred over non-parametric ones when the relation between variables is clear, knowledge from the field of study compensate the lack of information from the data by explicitly modeling some dependences."},{"code":null,"e":3661,"s":3529,"text":"The better our model can extrapolate from partial or truncated distribution, the closer empirical and generalization error will be."},{"code":null,"e":3701,"s":3661,"text":"“Correlation does not imply causation.”"},{"code":null,"e":3984,"s":3701,"text":"All data scientists have heard this sentence at least once, but it turns out only a few truly realize the implication when it comes to actual modeling. The topic of causality is actually just left out most of the time, sometimes wrongly, sometimes justifiably, but rarely knowingly."},{"code":null,"e":4157,"s":3984,"text":"A confounder is a variable that is causally related to both the covariate and the outcome of interest. As a causal concept, it cannot be described in terms of correlations."},{"code":null,"e":4294,"s":4157,"text":"The error yielded by confounders cannot be fully measured by traditional statistical methods since it is not a statistical error per se."},{"code":null,"e":4575,"s":4294,"text":"One could argue that when the empirical error is low, or even the generalization error; then we shouldn’t care if our model exploits true causality or spurious correlations. It might be true in some cases, but unless we explicitly know why, this situation shouldn’t be overlooked."},{"code":null,"e":4798,"s":4575,"text":"Simpson’s paradox, described by Edward Simpson in 1951 and George Udny Yule in 1903, is a paradox in which a statistical trend appears when data are segmented into separate groups but reverses when the groups are combined."},{"code":null,"e":4948,"s":4798,"text":"It is usually observed in the presence of a confounding variable (represented here by the different colour groups) when causal relations are ignored."},{"code":null,"e":5161,"s":4948,"text":"The impact Simpson’s paradox will have in machine learning is when it comes to decision making, where we are given the choice of which data we should consider to pick an action, the aggregated or the partitioned?"},{"code":null,"e":5619,"s":5161,"text":"Unfortunately the answer usually cannot be inferred from the data itself. Indeed with exact similar values we can have different answers, depending on the causal relationships among the variables. The real source of error lies outside of the data itself and eventually it doesn’t matter how low the test or cross-validation error is. We are not safe from getting making completely wrong decisions unless we are able to correctly model the environment first."},{"code":null,"e":5742,"s":5619,"text":"We will now through a concrete example show how to handle this situation in the concrete case of hierarchical dependences."},{"code":null,"e":6190,"s":5742,"text":"Often data scientists have to deal with geographic data, which have the disadvantage of not being easily exploitable by classic machine learning models. The location feature usually has a really high cardinality and is often unbalanced. However it is intuitive that parameters from models will vary from region to region, depending on local variables, but will still be closely related since they model the same phenomenon across different places."},{"code":null,"e":6334,"s":6190,"text":"We are going to see a way to handle this spatial dependence and thus to minimize all types of error listed above through the following example."},{"code":null,"e":6593,"s":6334,"text":"We consider the problem of estimating the use of contraceptive in Bangladesh, and to this end we use data come from the 1988 Bangladesh Fertility Survey. It consists of a subsample of 1934 women grouped in 60 districts, with variables are defined as follows:"},{"code":null,"e":6639,"s":6593,"text":"DISTRICT: identifying code for each district."},{"code":null,"e":6688,"s":6639,"text":"LC: Number of living children at time of survey."},{"code":null,"e":6725,"s":6688,"text":"AGE: Age of woman at time of survey."},{"code":null,"e":6761,"s":6725,"text":"URBAN: Type of region of residence."},{"code":null,"e":6883,"s":6761,"text":"and we’ll consider 3 logistic Bayesian regressions with different characteristics to model different possible approaches."},{"code":null,"e":7140,"s":6883,"text":"Here we simply obliviate the role of the DISTRICT variable by not using it. The result is a simple logistic regression with only 4 parameters, including the intercept. The location is not seen as a confounder and each region is assumed to behave similarly."},{"code":null,"e":7383,"s":7140,"text":"For each district we fit a different logistic regression, leading to a total of 60 models with 4 parameters each. Even if we assume behaviour to vary from district to district, we don’t take any advantage of the similarities they could share."},{"code":null,"e":7729,"s":7383,"text":"Bayesian hierarchical modelling is a statistical model written in multiple levels that estimates the parameters of the posterior distribution using the Bayesian method. The sub-models combine to form the hierarchical model, and Bayes’ theorem is used to integrate them with the observed data and account for all the uncertainty that is present.4"},{"code":null,"e":7956,"s":7729,"text":"We assume that while βs are different for each district as in the unpooled case, now the coefficients all share similarity. We can model this by assuming that each individual coefficient comes from a common group distribution:"},{"code":null,"e":7961,"s":7956,"text":"with"},{"code":null,"e":8254,"s":7961,"text":"Though analytically intractable, probabilistic programming allows us to compute the posterior of all our parameters using Markov chain Monte Carlo (MCMC) to sample from the posteriors distributions. Again pymc3 offers an extremely intuitive way to model our network and to compute posteriors!"},{"code":null,"e":8333,"s":8254,"text":"We can also easily compute a graphical representation of our Bayesian network:"},{"code":null,"e":8407,"s":8333,"text":"First of all, let’s try to understand the differences between our models."},{"code":null,"e":8565,"s":8407,"text":"Inspired by https://docs.pymc.io/notebooks/GLM-hierarchical.html, we can visualize the evolution of our regression parameters over the different regressions."},{"code":null,"e":8981,"s":8565,"text":"We display the coefficients of each district’s non-hierarchical posterior mean, hierarchical posterior mean, and pooled posterior mean. The small amount of data available at district level led unpooled posteriors to be spread far out and thus the shrinkage effect is really important, yet differences among mean in the hierarchical model are still significant, as betas vary in order of magnitude between districts."},{"code":null,"e":9328,"s":8981,"text":"We use the Area Under the ROC Curve (AUC) as our error measure for comparing the models. It can be seen as the probability that our models will score a randomly chosen positive class higher than a randomly chosen negative class. It is particularly interesting to us since it has the advantage of not requiring to set a threshold to assign labels."},{"code":null,"e":9700,"s":9328,"text":"We considered 2 test sets in order to measure performances, a stratified by district one and a non-stratified one. The use of a non-stratified test set is more representative of a case where the generalization error will differ significantly from the empirical model error, since features distributions among districts will vary significantly between train and test sets!"},{"code":null,"e":9838,"s":9700,"text":"The measures have been averaged over multiple seeds for the test set sampling in order to be more representative of the real performance."},{"code":null,"e":10231,"s":9838,"text":"┌───────────────────┬────────┬──────────┬──────────────┐│ │ POOLED │ UNPOOLED │ HIERARCHICAL │├───────────────────┼────────┼──────────┼──────────────┤│ Train │ 0.632 │ 0.818 │ 0.726 ││ Stratified Test │ 0.623 │ 0.618 │ 0.668 ││ Unstratified Test │ 0.634 │ 0.603 │ 0.663 │└───────────────────┴────────┴──────────┴──────────────┘"},{"code":null,"e":10329,"s":10231,"text":"The unpooled model strongly overfits as the huge gap between the train and test AUC demonstrates."},{"code":null,"e":10416,"s":10329,"text":"On the other hand, the pooled model is strongly biased and clearly underfits the data."},{"code":null,"e":10554,"s":10416,"text":"Finally, our hierarchical model performed significantly better than the others by taking advantage of parameters geographic similarities."},{"code":null,"e":10673,"s":10554,"text":"The shrinkage effect provided us with an improved statistical power and can also be seen as a smart way to regularize."},{"code":null,"e":10997,"s":10673,"text":"Some districts have extremely few individuals from which to train, and thus the unstratified test error on those gets bigger with the unpooled model. The partial pooling takes into account the similarity between parameters, and provides low density districts with information from others, while keeping their specificities."},{"code":null,"e":11223,"s":10997,"text":"As a result the difference between models is even more significant regarding the unstratified test set, showing us its generalization capacity is greater as performances are almost not affected by the stratification strategy."},{"code":null,"e":11368,"s":11223,"text":"=> Multi-level hierarchical Bayesian models outperform basic approaches when we have multiple sets of measurements we expect to have similarity."},{"code":null,"e":11576,"s":11368,"text":"It would be interesting to compare this Bayesian approach to other classic data preprocessing approaches (different encodings of the district variable) or algorithms (gradient boosting, random forest, etc.)."},{"code":null,"e":11775,"s":11576,"text":"AutoML cannot replace Data Scientists yet as it is not able to distinguish empirical error measures from actual business objectives, nor to model correctly dependences between covariates and target."},{"code":null,"e":12099,"s":11775,"text":"Experts are still required to understand data and model properly the problems. To this end, they have access to a range of mathematics and informatic tools, including pymc3 library and Bayesian hierarchical models, allowing to easily model and compute distributions in the very common case of hierarchically structured data"},{"code":null,"e":12258,"s":12099,"text":"[1]: Wikipedia contributors. “Automated machine learning.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 18 Feb. 2020. Web. 1 Mar. 2020."},{"code":null,"e":12486,"s":12258,"text":"[2,3]: Wikipedia contributors. (2020, February 22). Generalization error. In Wikipedia, The Free Encyclopedia. Retrieved 16:40, March 1, 2020, from https://en.wikipedia.org/w/index.php?title=Generalization_error&oldid=942140633"},{"code":null,"e":12649,"s":12486,"text":"[4]: Wikipedia contributors. “Bayesian hierarchical modeling.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 12 Dec. 2019. Web. 1 Mar. 2020."},{"code":null,"e":12767,"s":12649,"text":"Causality: Models, Reasoning, and Inference, Cambridge University Press (2000, 2nd edition 2009). ISBN 0–521–77362–8."},{"code":null,"e":12820,"s":12767,"text":"https://docs.pymc.io/notebooks/GLM-hierarchical.html"},{"code":null,"e":12964,"s":12820,"text":"Huq, N. M., and Cleland, J. 1990. Bangladesh Fertility Survey 1989 (Main Report). Dhaka: National Institute of Population Research and Training"}],"string":"[\n {\n \"code\": null,\n \"e\": 389,\n \"s\": 171,\n \"text\": \"In this article, we will use a Probabilistic Programming library developed for python, pymc3. A first introduction to the efficiency of Bayesian approaches in basic statistics and to pymc3 can be found here and there.\"\n },\n {\n \"code\": null,\n \"e\": 734,\n \"s\": 389,\n \"text\": \"Since early 2018, Automated Machine Learning has become one of the trendiest topic in data science. Amazon’s Sagemaker or Google AutoML to mention just a few are now accessible to most Data Scientists, to such an extent that some tend to think exploring and understanding data is not necessary anymore in order to build machine learning models.\"\n },\n {\n \"code\": null,\n \"e\": 783,\n \"s\": 734,\n \"text\": \"The promise of AutoML can be summed up this way:\"\n },\n {\n \"code\": null,\n \"e\": 954,\n \"s\": 783,\n \"text\": \"the high degree of automation allows non-experts to make use of machine learning models and techniques without requiring to become an expert in a particular field first.1\"\n },\n {\n \"code\": null,\n \"e\": 1146,\n \"s\": 954,\n \"text\": \"The reasoning is straightforward ; expertise is no longer required, just give data to the AutoML algorithm, and after testing a fixed number of predefined models, it will return the best one.\"\n },\n {\n \"code\": null,\n \"e\": 1436,\n \"s\": 1146,\n \"text\": \"But here’s the catch... AutoML algorithms ignore what best means to us, and will merely try to minimize an empirical error. Knowledge and expertise are still required to understand what this error really means, and to what extent it differs from the one we are actually hoping to minimize.\"\n },\n {\n \"code\": null,\n \"e\": 1489,\n \"s\": 1436,\n \"text\": \"Given n data points, the empirical error is given by\"\n },\n {\n \"code\": null,\n \"e\": 1608,\n \"s\": 1489,\n \"text\": \"for a particular function fn over all observable values of features xi and target yi where V denotes a loss function.2\"\n },\n {\n \"code\": null,\n \"e\": 1842,\n \"s\": 1608,\n \"text\": \"It is usually computed through a cross-validation or train/test process. Missing some links between covariates and target will generate a bias often leading to under/over-fitting and eventually to a higher test/cross-validated error.\"\n },\n {\n \"code\": null,\n \"e\": 1955,\n \"s\": 1842,\n \"text\": \"It is too often, and wrongly, the only one taken into account by data scientists (and always by autoML engines).\"\n },\n {\n \"code\": null,\n \"e\": 2139,\n \"s\": 1955,\n \"text\": \"generalization error(also known as the out-of-sample error) is a measure of how accurately an algorithm is able to predict outcome values for previously unseen data. It is defined by:\"\n },\n {\n \"code\": null,\n \"e\": 2324,\n \"s\": 2139,\n \"text\": \"for a particular function fn over all possible values of features x and target y where V denotes a loss function and ρ(x, y) is the unknown joint probability distribution for x and y.3\"\n },\n {\n \"code\": null,\n \"e\": 2535,\n \"s\": 2324,\n \"text\": \"Obviously, it is usually the one we would prefer to minimize when training our model. Unfortunately, it cannot be computed directly from data, unlike the empirical one which is merely an estimate of the former.\"\n },\n {\n \"code\": null,\n \"e\": 2940,\n \"s\": 2535,\n \"text\": \"With enough good quality data, we interchange those. Now imagine only part of the information is available at training time, or that there just isn’t enough data at all, as it is often the case in Machine Learning. Our model will then be trained on a different distribution ρ(x, y) from the one it will be running on later, and as a consequence, the empirical error will diverge from the generalized one.\"\n },\n {\n \"code\": null,\n \"e\": 2985,\n \"s\": 2940,\n \"text\": \"Biased data leads to biased empirical error.\"\n },\n {\n \"code\": null,\n \"e\": 3285,\n \"s\": 2985,\n \"text\": \"We can already see how much of a problem this is: the empirical error does not estimate the desired quantity anymore, but is the only we can compute from data. We need to make sure it can still be relied on to some extent. This is where human expertise comes into plays and shows its full potential.\"\n },\n {\n \"code\": null,\n \"e\": 3529,\n \"s\": 3285,\n \"text\": \"The same way parametric approaches are preferred over non-parametric ones when the relation between variables is clear, knowledge from the field of study compensate the lack of information from the data by explicitly modeling some dependences.\"\n },\n {\n \"code\": null,\n \"e\": 3661,\n \"s\": 3529,\n \"text\": \"The better our model can extrapolate from partial or truncated distribution, the closer empirical and generalization error will be.\"\n },\n {\n \"code\": null,\n \"e\": 3701,\n \"s\": 3661,\n \"text\": \"“Correlation does not imply causation.”\"\n },\n {\n \"code\": null,\n \"e\": 3984,\n \"s\": 3701,\n \"text\": \"All data scientists have heard this sentence at least once, but it turns out only a few truly realize the implication when it comes to actual modeling. The topic of causality is actually just left out most of the time, sometimes wrongly, sometimes justifiably, but rarely knowingly.\"\n },\n {\n \"code\": null,\n \"e\": 4157,\n \"s\": 3984,\n \"text\": \"A confounder is a variable that is causally related to both the covariate and the outcome of interest. As a causal concept, it cannot be described in terms of correlations.\"\n },\n {\n \"code\": null,\n \"e\": 4294,\n \"s\": 4157,\n \"text\": \"The error yielded by confounders cannot be fully measured by traditional statistical methods since it is not a statistical error per se.\"\n },\n {\n \"code\": null,\n \"e\": 4575,\n \"s\": 4294,\n \"text\": \"One could argue that when the empirical error is low, or even the generalization error; then we shouldn’t care if our model exploits true causality or spurious correlations. It might be true in some cases, but unless we explicitly know why, this situation shouldn’t be overlooked.\"\n },\n {\n \"code\": null,\n \"e\": 4798,\n \"s\": 4575,\n \"text\": \"Simpson’s paradox, described by Edward Simpson in 1951 and George Udny Yule in 1903, is a paradox in which a statistical trend appears when data are segmented into separate groups but reverses when the groups are combined.\"\n },\n {\n \"code\": null,\n \"e\": 4948,\n \"s\": 4798,\n \"text\": \"It is usually observed in the presence of a confounding variable (represented here by the different colour groups) when causal relations are ignored.\"\n },\n {\n \"code\": null,\n \"e\": 5161,\n \"s\": 4948,\n \"text\": \"The impact Simpson’s paradox will have in machine learning is when it comes to decision making, where we are given the choice of which data we should consider to pick an action, the aggregated or the partitioned?\"\n },\n {\n \"code\": null,\n \"e\": 5619,\n \"s\": 5161,\n \"text\": \"Unfortunately the answer usually cannot be inferred from the data itself. Indeed with exact similar values we can have different answers, depending on the causal relationships among the variables. The real source of error lies outside of the data itself and eventually it doesn’t matter how low the test or cross-validation error is. We are not safe from getting making completely wrong decisions unless we are able to correctly model the environment first.\"\n },\n {\n \"code\": null,\n \"e\": 5742,\n \"s\": 5619,\n \"text\": \"We will now through a concrete example show how to handle this situation in the concrete case of hierarchical dependences.\"\n },\n {\n \"code\": null,\n \"e\": 6190,\n \"s\": 5742,\n \"text\": \"Often data scientists have to deal with geographic data, which have the disadvantage of not being easily exploitable by classic machine learning models. The location feature usually has a really high cardinality and is often unbalanced. However it is intuitive that parameters from models will vary from region to region, depending on local variables, but will still be closely related since they model the same phenomenon across different places.\"\n },\n {\n \"code\": null,\n \"e\": 6334,\n \"s\": 6190,\n \"text\": \"We are going to see a way to handle this spatial dependence and thus to minimize all types of error listed above through the following example.\"\n },\n {\n \"code\": null,\n \"e\": 6593,\n \"s\": 6334,\n \"text\": \"We consider the problem of estimating the use of contraceptive in Bangladesh, and to this end we use data come from the 1988 Bangladesh Fertility Survey. It consists of a subsample of 1934 women grouped in 60 districts, with variables are defined as follows:\"\n },\n {\n \"code\": null,\n \"e\": 6639,\n \"s\": 6593,\n \"text\": \"DISTRICT: identifying code for each district.\"\n },\n {\n \"code\": null,\n \"e\": 6688,\n \"s\": 6639,\n \"text\": \"LC: Number of living children at time of survey.\"\n },\n {\n \"code\": null,\n \"e\": 6725,\n \"s\": 6688,\n \"text\": \"AGE: Age of woman at time of survey.\"\n },\n {\n \"code\": null,\n \"e\": 6761,\n \"s\": 6725,\n \"text\": \"URBAN: Type of region of residence.\"\n },\n {\n \"code\": null,\n \"e\": 6883,\n \"s\": 6761,\n \"text\": \"and we’ll consider 3 logistic Bayesian regressions with different characteristics to model different possible approaches.\"\n },\n {\n \"code\": null,\n \"e\": 7140,\n \"s\": 6883,\n \"text\": \"Here we simply obliviate the role of the DISTRICT variable by not using it. The result is a simple logistic regression with only 4 parameters, including the intercept. The location is not seen as a confounder and each region is assumed to behave similarly.\"\n },\n {\n \"code\": null,\n \"e\": 7383,\n \"s\": 7140,\n \"text\": \"For each district we fit a different logistic regression, leading to a total of 60 models with 4 parameters each. Even if we assume behaviour to vary from district to district, we don’t take any advantage of the similarities they could share.\"\n },\n {\n \"code\": null,\n \"e\": 7729,\n \"s\": 7383,\n \"text\": \"Bayesian hierarchical modelling is a statistical model written in multiple levels that estimates the parameters of the posterior distribution using the Bayesian method. The sub-models combine to form the hierarchical model, and Bayes’ theorem is used to integrate them with the observed data and account for all the uncertainty that is present.4\"\n },\n {\n \"code\": null,\n \"e\": 7956,\n \"s\": 7729,\n \"text\": \"We assume that while βs are different for each district as in the unpooled case, now the coefficients all share similarity. We can model this by assuming that each individual coefficient comes from a common group distribution:\"\n },\n {\n \"code\": null,\n \"e\": 7961,\n \"s\": 7956,\n \"text\": \"with\"\n },\n {\n \"code\": null,\n \"e\": 8254,\n \"s\": 7961,\n \"text\": \"Though analytically intractable, probabilistic programming allows us to compute the posterior of all our parameters using Markov chain Monte Carlo (MCMC) to sample from the posteriors distributions. Again pymc3 offers an extremely intuitive way to model our network and to compute posteriors!\"\n },\n {\n \"code\": null,\n \"e\": 8333,\n \"s\": 8254,\n \"text\": \"We can also easily compute a graphical representation of our Bayesian network:\"\n },\n {\n \"code\": null,\n \"e\": 8407,\n \"s\": 8333,\n \"text\": \"First of all, let’s try to understand the differences between our models.\"\n },\n {\n \"code\": null,\n \"e\": 8565,\n \"s\": 8407,\n \"text\": \"Inspired by https://docs.pymc.io/notebooks/GLM-hierarchical.html, we can visualize the evolution of our regression parameters over the different regressions.\"\n },\n {\n \"code\": null,\n \"e\": 8981,\n \"s\": 8565,\n \"text\": \"We display the coefficients of each district’s non-hierarchical posterior mean, hierarchical posterior mean, and pooled posterior mean. The small amount of data available at district level led unpooled posteriors to be spread far out and thus the shrinkage effect is really important, yet differences among mean in the hierarchical model are still significant, as betas vary in order of magnitude between districts.\"\n },\n {\n \"code\": null,\n \"e\": 9328,\n \"s\": 8981,\n \"text\": \"We use the Area Under the ROC Curve (AUC) as our error measure for comparing the models. It can be seen as the probability that our models will score a randomly chosen positive class higher than a randomly chosen negative class. It is particularly interesting to us since it has the advantage of not requiring to set a threshold to assign labels.\"\n },\n {\n \"code\": null,\n \"e\": 9700,\n \"s\": 9328,\n \"text\": \"We considered 2 test sets in order to measure performances, a stratified by district one and a non-stratified one. The use of a non-stratified test set is more representative of a case where the generalization error will differ significantly from the empirical model error, since features distributions among districts will vary significantly between train and test sets!\"\n },\n {\n \"code\": null,\n \"e\": 9838,\n \"s\": 9700,\n \"text\": \"The measures have been averaged over multiple seeds for the test set sampling in order to be more representative of the real performance.\"\n },\n {\n \"code\": null,\n \"e\": 10231,\n \"s\": 9838,\n \"text\": \"┌───────────────────┬────────┬──────────┬──────────────┐│ │ POOLED │ UNPOOLED │ HIERARCHICAL │├───────────────────┼────────┼──────────┼──────────────┤│ Train │ 0.632 │ 0.818 │ 0.726 ││ Stratified Test │ 0.623 │ 0.618 │ 0.668 ││ Unstratified Test │ 0.634 │ 0.603 │ 0.663 │└───────────────────┴────────┴──────────┴──────────────┘\"\n },\n {\n \"code\": null,\n \"e\": 10329,\n \"s\": 10231,\n \"text\": \"The unpooled model strongly overfits as the huge gap between the train and test AUC demonstrates.\"\n },\n {\n \"code\": null,\n \"e\": 10416,\n \"s\": 10329,\n \"text\": \"On the other hand, the pooled model is strongly biased and clearly underfits the data.\"\n },\n {\n \"code\": null,\n \"e\": 10554,\n \"s\": 10416,\n \"text\": \"Finally, our hierarchical model performed significantly better than the others by taking advantage of parameters geographic similarities.\"\n },\n {\n \"code\": null,\n \"e\": 10673,\n \"s\": 10554,\n \"text\": \"The shrinkage effect provided us with an improved statistical power and can also be seen as a smart way to regularize.\"\n },\n {\n \"code\": null,\n \"e\": 10997,\n \"s\": 10673,\n \"text\": \"Some districts have extremely few individuals from which to train, and thus the unstratified test error on those gets bigger with the unpooled model. The partial pooling takes into account the similarity between parameters, and provides low density districts with information from others, while keeping their specificities.\"\n },\n {\n \"code\": null,\n \"e\": 11223,\n \"s\": 10997,\n \"text\": \"As a result the difference between models is even more significant regarding the unstratified test set, showing us its generalization capacity is greater as performances are almost not affected by the stratification strategy.\"\n },\n {\n \"code\": null,\n \"e\": 11368,\n \"s\": 11223,\n \"text\": \"=> Multi-level hierarchical Bayesian models outperform basic approaches when we have multiple sets of measurements we expect to have similarity.\"\n },\n {\n \"code\": null,\n \"e\": 11576,\n \"s\": 11368,\n \"text\": \"It would be interesting to compare this Bayesian approach to other classic data preprocessing approaches (different encodings of the district variable) or algorithms (gradient boosting, random forest, etc.).\"\n },\n {\n \"code\": null,\n \"e\": 11775,\n \"s\": 11576,\n \"text\": \"AutoML cannot replace Data Scientists yet as it is not able to distinguish empirical error measures from actual business objectives, nor to model correctly dependences between covariates and target.\"\n },\n {\n \"code\": null,\n \"e\": 12099,\n \"s\": 11775,\n \"text\": \"Experts are still required to understand data and model properly the problems. To this end, they have access to a range of mathematics and informatic tools, including pymc3 library and Bayesian hierarchical models, allowing to easily model and compute distributions in the very common case of hierarchically structured data\"\n },\n {\n \"code\": null,\n \"e\": 12258,\n \"s\": 12099,\n \"text\": \"[1]: Wikipedia contributors. “Automated machine learning.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 18 Feb. 2020. Web. 1 Mar. 2020.\"\n },\n {\n \"code\": null,\n \"e\": 12486,\n \"s\": 12258,\n \"text\": \"[2,3]: Wikipedia contributors. (2020, February 22). Generalization error. In Wikipedia, The Free Encyclopedia. Retrieved 16:40, March 1, 2020, from https://en.wikipedia.org/w/index.php?title=Generalization_error&oldid=942140633\"\n },\n {\n \"code\": null,\n \"e\": 12649,\n \"s\": 12486,\n \"text\": \"[4]: Wikipedia contributors. “Bayesian hierarchical modeling.” Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 12 Dec. 2019. Web. 1 Mar. 2020.\"\n },\n {\n \"code\": null,\n \"e\": 12767,\n \"s\": 12649,\n \"text\": \"Causality: Models, Reasoning, and Inference, Cambridge University Press (2000, 2nd edition 2009). ISBN 0–521–77362–8.\"\n },\n {\n \"code\": null,\n \"e\": 12820,\n \"s\": 12767,\n \"text\": \"https://docs.pymc.io/notebooks/GLM-hierarchical.html\"\n },\n {\n \"code\": null,\n \"e\": 12964,\n \"s\": 12820,\n \"text\": \"Huq, N. M., and Cleland, J. 1990. Bangladesh Fertility Survey 1989 (Main Report). Dhaka: National Institute of Population Research and Training\"\n }\n]"}}},{"rowIdx":44,"cells":{"title":{"kind":"string","value":"Apache NiFi - Quick Guide"},"text":{"kind":"string","value":"Apache NiFi is a powerful, easy to use and reliable system to process and distribute data between disparate systems. It is based on Niagara Files technology developed by NSA and then after 8 years donated to Apache Software foundation. It is distributed under Apache License Version 2.0, January 2004. The latest version for Apache NiFi is 1.7.1.\nApache NiFi is a real time data ingestion platform, which can transfer and manage data transfer between different sources and destination systems. It supports a wide variety of data formats like logs, geo location data, social feeds, etc. It also supports many protocols like SFTP, HDFS, and KAFKA, etc. This support to wide variety of data sources and protocols making this platform popular in many IT organizations.\nThe general features of Apache NiFi are as follows −\nApache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring.\nApache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring.\nIt is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime.\nIt is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime.\nIt also provides data provenance module to track and monitor data from the start to the end of the flow.\nIt also provides data provenance module to track and monitor data from the start to the end of the flow.\nDevelopers can create their own custom processors and reporting tasks according to their needs.\nDevelopers can create their own custom processors and reporting tasks according to their needs.\nNiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions.\nNiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions.\nIt also supports user and role management and also can be configured with LDAP for authorization.\nIt also supports user and role management and also can be configured with LDAP for authorization.\nThe key concepts of Apache NiFi are as follows −\nProcess Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner.\nProcess Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner.\nFlow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources.\nFlow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources.\nProcessor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile.\nProcessor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile.\nFlowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow.\nFlowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow.\nEvent − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance.\nEvent − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance.\nData provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile.\nData provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile.\nApache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage.\nApache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage.\nApache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing.\nApache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing.\nIt also provides security policies on user level, process group level and other modules too.\nIt also provides security policies on user level, process group level and other modules too.\nIts UI can also run on HTTPS, which makes the interaction of users with NiFi secure.\nIts UI can also run on HTTPS, which makes the interaction of users with NiFi secure.\nNiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems.\nNiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems.\nWhen node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node.\nWhen node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node.\nApache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems.\nApache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems.\nApache NiFi consist of a web server, flow controller and a processor, which runs on Java Virtual Machine. It also has 3 repositories Flowfile Repository, Content Repository, and Provenance Repository as shown in the figure below.\nThis repository stores the current state and attributes of every flowfile that goes through the data flows of apache NiFi. The default location of this repository is in the root directory of apache NiFi. The location of this repository can be changed by changing the property named \"nifi.flowfile.repository.directory\".\nThis repository contains all the content present in all the flowfiles of NiFi. Its default directory is also in the root directory of NiFi and it can be changed using \"org.apache.nifi.controller.repository.FileSystemRepository\" property. This directory uses large space in disk so it is advisable to have enough space in the installation disk.\nThe repository tracks and stores all the events of all the flowfiles that flow in NiFi. There are two provenance repositories - volatile provenance repository (in this repository all the provenance data get lost after restart) and persistent provenance repository. Its default directory is also in the root directory of NiFi and it can be changed using \"org.apache.nifi.provenance.PersistentProvenanceRepository\" and \"org.apache.nifi.provenance.VolatileProvenanceRepositor\" property for the respective repositories.\nIn this chapter, we will learn about the environment setup ofApache NiFi. The steps for installation of Apache NiFi are as follows −\nStep 1 − Install the current version of Java in your computer. Please set theJAVA_HOME in your machine. You can check the version as shown below:\nIn Windows Operating System (OS) (using command prompt) −\n> java -version\n\nIn UNIX OS (Using Terminal):\n$ echo $JAVA_HOME\n\n\n\n\nStep 2 − DownloadApache NiFi from https://nifi.apache.org/download.html\nFor windows OSdownload ZIP file.\nFor windows OSdownload ZIP file.\nFor UNIX OSdownload TAR file.\nFor UNIX OSdownload TAR file.\nFor docker images,go to the following link https://hub.docker.com/r/apache/nifi/.\nFor docker images,go to the following link https://hub.docker.com/r/apache/nifi/.\nStep 3 − The installation process for Apache NiFi is very easy. The process differs with the OS −\nWindows OS − Unzip the zip package and the Apache NiFi is installed.\nWindows OS − Unzip the zip package and the Apache NiFi is installed.\nUNIX OS − Extract tar file in any location and the Logstash is installed.\nUNIX OS − Extract tar file in any location and the Logstash is installed.\n$tar -xvf nifi-1.6.0-bin.tar.gz\n\nStep 4 − Open command prompt, go to the bin directory of NiFi. For example, C:\\nifi-1.7.1\\bin, and execute run-nifi.bat file.\nC:\\nifi-1.7.1\\bin>run-nifi.bat\n\nStep 5 − It will take a few minutes to get the NiFi UI up. A user cancheck nifi-app.log, once NiFi UI is up then, a user can enter http://localhost:8080/nifi/ to access UI.\nApache is a web-based platform that can be accessed by a user using web UI. The NiFi UI is very interactive and provides a wide variety of information about NiFi. As shown in the image below, a user can access information about the following attributes −\nActive Threads\nTotal queued data\nTransmitting Remote Process Groups\nNot Transmitting Remote Process Groups\nRunning Components\nStopped Components\nInvalid Components\nDisabled Components\nUp to date Versioned Process Groups\nLocally modified Versioned Process Groups\nStale Versioned Process Groups\nLocally modified and Stale Versioned Process Groups\nSync failure Versioned Process Groups\nApache NiFi UI has the following components −\nUser can drag the process icon on the canvas and select the desired processor for the data flow in NiFi.\nBelow icon is dragged to canvas to add the input port into any data flow.\nInput port is used to get data from the processor, which is not present in that process group.\nAfter dragging this icon, NiFi asks to enter the name of the Input port and then it is added to the NiFi canvas.\nThe below icon is dragged to canvas to add the output port into any data flow.\nThe output port is used to transfer data to the processor, which is not present in that process group.\nAfter dragging this icon, NiFi asks to enter the name of the Output port and then it is added to the NiFi canvas.\nA user uses below icon to add process group in the NiFi canvas.\nAfter dragging this icon, NiFi asks to enter the name of the Process Group and then it is added to the NiFi canvas.\nThis is used to add Remote process group in NiFi canvas.\nFunnel is used to transfer the output of a processor to multiple processors. User can use the below icon to add the funnel in a NiFi data flow.\nThis icon is used to add a data flow template to NiFi canvas. This helps to reuse the data flow in the same or different NiFi instances.\nAfter dragging, a user can select the templates already added in the NiFi.\nThese are used to add text on NiFi canvas about any component present in NiFi. It offers a range of colors used by a user to add aesthetic sense.\nApache NiFi processors are the basic blocks of creating a data flow. Every processor has different functionality, which contributes to the creation of output flowfile. Dataflow shown in the image below is fetching file from one directory using GetFile processor and storing it in another directory using PutFile processor.\nGetFile process is used to fetch files of a specific format from a specific directory. It also provides other options to user for more control on fetching. We will discuss it in properties section below.\nFollowing are the different settings of GetFile processor −\nIn the Name setting, a user can define any name for the processors either according to the project or by that, which makes the name more meaningful.\nA user can enable or disable the processor using this setting.\nThis setting lets a user to add the penalty time duration, in the event of flowfile failure.\nThis setting is used to specify the yield time for processor. In this duration, the process is not scheduled again.\nThis setting is used to specify the log level of that processor.\nThis has a list of check of all the available relationship of that particular process. By checking the boxes, a user can program processor to terminate the flowfile on that event and do not send it further in the flow.\nThese are the following scheduling options offered by the GetFile processor −\nYou can either schedule the process on time basis by selecting time driven or a specified CRON string by selecting a CRON driver option.\nThis option is used to define the concurrent task schedule for this processor.\nA user can define whether to run the processor in all nodes or only in Primary node by using this option.\nIt is used to define the time for time driven strategy or CRON expression for CRON driven strategy.\nGetFile offers multiple properties as shown in the image below raging compulsory\nproperties like Input directory and file filter to optional properties like Path Filter and Maximum file Size. A user can manage file fetching process using these properties.\nThis Section is used to specify any information about processor.\nThe PutFile processor is used to store the file from the data flow to a specific location.\nThe PutFile processor has the following settings −\nIn the Name setting, a user can define any name for the processors either according to the project or by that which makes the name more meaningful.\nA user can enable or disable the processor using this setting.\nThis setting lets a user add the penalty time duration, in the event of flowfile failure.\nThis setting is used to specify the yield time for processor. In this duration, the process does not get scheduled again.\nThis setting is used to specify the log level of that processor.\nThis settings has a list of check of all the available relationship of that particular process. By checking the boxes, user can program processor to terminate the flowfile on that event and do not send it further in the flow.\nThese are the following scheduling options offered by the PutFile processor −\nYou can schedule the process on time basis either by selecting timer driven or a specified CRON string by selecting CRON driver option. There is also an Experimental strategy Event Driven, which will trigger the processor on a specific event.\nThis option is used to define the concurrent task schedule for this processor.\nA user can define whether to run the processor in all nodes or only in primary node by using this option.\nIt is used to define the time for timer driven strategy or CRON expression for CRON driven strategy.\nThe PutFile processor provides properties like Directory to specify the output directory for the purpose of file transfer and others to manage the transfer as shown in the image below.\nThis Section is used to specify any information about processor.\nIn this chapter, we will discuss process categorization in Apache NiFi.\nThe processors under Data Ingestion category are used to ingest data into the NiFi data flow. These are mainly the starting point of any data flow in apache NiFi. Some of the processors that belong to these categories are GetFile, GetHTTP, GetFTP, GetKAFKA, etc.\nRouting and Mediation processors are used to route the flowfiles to different processors or data flows according to the information in attributes or content of those flowfiles. These processors are also responsible to control the NiFi data flows. Some of the processors that belong to this category are RouteOnAttribute, RouteOnContent, ControlRate, RouteText, etc.\nThe processors of this Database Access category are capable of selecting or inserting data or executing and preparing other SQL statements from database. These processors mainly use data connection pool controller setting of Apache NiFi. Some of the processors that belong to this category are ExecuteSQL, PutSQL, PutDatabaseRecord, ListDatabaseTables, etc.\nAttribute Extraction Processors are responsible to extract, analyze, change flowfile attributes processing in the NiFi data flow. Some of the processors that belong to this category are UpdateAttribute, EvaluateJSONPath, ExtractText, AttributesToJSON, etc.\nSystem Interaction processors are used to run processes or commands in any operating system. These processors also run scripts in many languages to interact with a variety of systems. Some of the processors that belong to this category are ExecuteScript, ExecuteProcess, ExecuteGroovyScript, ExecuteStreamCommand, etc.\nProcessors that belong to Data Transformation are capable of altering content of the flowfiles. These can be used to fully replace the data of a flowfile normally used when a user has to send flowfile as an HTTP body to invokeHTTP processor. Some of the processors that belong to this category are ReplaceText, JoltTransformJSON, etc.\nSending Data Processors are generally the end processor in a data flow. These processors are responsible to store or send data to the destination server. After successful storing or sending the data, these processors DROP the flowfile with success relationship. Some of the processors that belong to this category are PutEmail, PutKafka, PutSFTP, PutFile, PutFTP, etc.\nThese processors are used to split and merge the content present in a flowfile. Some of the processors that belong to this category are SplitText, SplitJson, SplitXml, MergeContent, SplitContent, etc.\nThese processors deal with the HTTP and HTTPS calls. Some of the processors that belong to this category are InvokeHTTP, PostHTTP, ListenHTTP, etc.\nAWS processors are responsible to interaction with Amazon web services system. Some of the processors that belong to this category are GetSQS, PutSNS, PutS3Object, FetchS3Object, etc.\nIn an Apache NiFi data flow, flowfiles move from one to another processor through connection that gets validated using a relationship between processors. Whenever a connection is created, a developer selects one or more relationships between those processors.\nAs you can see in the above image, the check boxes in black rectangle are relationships. If a developer selects these check boxes then, the flowfile will terminate in that particular processor, when the relationship is success or failure or both.\nWhen a processor successfully processes a flowfile like store or fetch data from any datasource without getting any connection, authentication or any other error, then the flowfile goes to success relationship.\nWhen a processor is not able to process a flowfile without errors like authentication error or connection problem, etc. then the flowfile goes to a failure relationship.\nA developer can also transfer the flowfiles to other processors using connections. The developer can select and also load balance it, but load balancing is just released in version 1.8, which will not be covered in this tutorial.\nAs you can see in the above image the connection marked in red have failure relationship, which means all flowfiles with errors will go to the processor in left and respectively all the flowfiles without errors will be transferred to the connection marked in green.\nLet us now proceed with the other relationships.\nThis relationship is met, when a Flowfile could not be fetched from the remote server due to a communications failure.\nAny Flowfile for which we receive a ‘Not Found’ message from the remote server will move to not.found relationship.\nWhen NiFi unable to fetch a flowfile from the remote server due to insufficient permission, it will move through this relationship.\nA flowfile is a basic processing entity in Apache NiFi. It contains data contents and attributes, which are used by NiFi processors to process data. The file content normally contains the data fetched from source systems. The most common attributes of an Apache NiFi FlowFile are −\nThis stands for Universally Unique Identifier, which is a unique identity of a flowfile generated by NiFi.\nThis attribute contains the filename of that flowfile and it should not contain any directory structure.\nIt contains the size of an Apache NiFi FlowFile.\nIt specifies the MIME Type of this FlowFile.\nThis attribute contains the relative path of a file to which a flowfile belongs and does not contain the file name.\nThe Apache NiFi data flow connection has a queuing system to handle the large amount of data inflow. These queues can handle very large amount of FlowFiles to let the processor process them serially.\nThe queue in the above image has 1 flowfile transferred through success relationship. A user can check the flowfile by selecting the List queue option in the drop down list. In case of any overload or error, a user can also clear the queue by selecting the empty queue option and then the user can restart the flow to get those files again in the data flow.\nThe list of flowfiles in a queue, consist of position, UUID, Filename, File size, Queue Duration, and Lineage Duration. A user can see all the attributes and content of a flowfile by clicking the info icon present at the first column of the flowfile list.\nIn Apache NiFi, a user can maintain different data flows in different process groups. These groups can be based on different projects or the organizations, which Apache NiFi instance supports.\nThe fourth symbol in the menu at the top of the NiFi UI as shown in the above picture is used to add a process group in the NiFi canvas. The process group named\n“Tutorialspoint.com_ProcessGroup” contains a data flow with four processors currently in stop stage as you can see in the above picture. Process groups can be created in hierarchical manner to manage the data flows in better structure, which is easy to understand.\nIn the footer of NiFi UI, you can see the process groups and can go back to the top of the process group a user is currently present in.\nTo see the full list of process groups present in NiFi, a user can go to the summary by using the menu present in the left top side of the NiFi UI. In summary, there is process groups tab where all the process groups are listed with parameters like Version State, Transferred/Size, In/Size, Read/Write, Out/Size, etc. as shown in the below picture.\nApache NiFi offers labels to enable a developer to write information about the components present in the NiFI canvas. The leftmost icon in the top menu of NiFi UI is used to add the label in NiFi canvas.\nA developer can change the color of the label and the size of the text with a right-click on the label and choose the appropriate option from the menu.\nApache NiFi is highly configurable platform. The nifi.properties file in conf directory\ncontains most of the configuration.\nThe commonly used properties of Apache NiFi are as follows −\nThis section contains the properties, which are compulsory to run a NiFi instance.\nThese properties are used to store the state of the components helpful to start the processing, where components left after a restart and in the next schedule running.\nLet us now look into the important details of the FlowFile repository −\nApache NiFi offers support to multiple tools like ambari, zookeeper for administration purposes. NiFi also provides configuration in nifi.properties file to set up HTTPS and other things for administrators.\nNiFi itself does not handle voting process in cluster. This means when a cluster is created, all the nodes are primary and coordinator. So, zookeeper is configured to manage the voting of primary node and coordinator. The nifi.properties file contains some properties to setup zookeeper.\nTo use NiFi over HTTPS, administrators have to generate keystore and truststore and set some properties in the nifi.properties file. The TLS toolkit can be used to generate all the necessary keys to enable HTTPS in apache NiFi.\nThere are some other properties, which are used by administrators to manage the NiFi and for its service continuity.\nApache NiFi offers a large number of components to help developers to create data flows for any type of protocols or data sources. To create a flow, a developer drags the components from menu bar to canvas and connects them by clicking and dragging the mouse from one component to other.\nGenerally, a NiFi has a listener component at the starting of the flow like getfile, which gets the data from source system. On the other end of there is a transmitter component like putfile and there are components in between, which process the data.\nFor example, let create a flow, which takes an empty file from one directory and add some text in that file and put it in another directory.\nTo begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list.\nTo begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list.\nCreate an input directory like c:\\inputdir.\nCreate an input directory like c:\\inputdir.\nRight-click on the processor and select configure and in properties tab add Input Directory (c:\\inputdir) and click apply and go back to canvas.\nRight-click on the processor and select configure and in properties tab add Input Directory (c:\\inputdir) and click apply and go back to canvas.\nDrag the processor icon to the canvas and select the ReplaceText processor from the list.\nDrag the processor icon to the canvas and select the ReplaceText processor from the list.\nRight-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply.\nRight-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply.\nGo to settings tab, check the failure checkbox at right hand side, and then go back to the canvas.\nGo to settings tab, check the failure checkbox at right hand side, and then go back to the canvas.\nConnect GetFIle processor to ReplaceText on success relationship.\nConnect GetFIle processor to ReplaceText on success relationship.\nDrag the processor icon to the canvas and select the PutFile processor from the list.\nDrag the processor icon to the canvas and select the PutFile processor from the list.\nCreate an output directory like c:\\outputdir.\nCreate an output directory like c:\\outputdir.\nRight-click on the processor and select configure. In the properties tab, add Directory (c:\\outputdir) and click apply and go back to canvas.\nRight-click on the processor and select configure. In the properties tab, add Directory (c:\\outputdir) and click apply and go back to canvas.\nGo to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas.\nGo to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas.\nConnect the ReplaceText processor to PutFile on success relationship.\nConnect the ReplaceText processor to PutFile on success relationship.\nNow start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file.\nNow start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file.\nBy following the above steps, developers can choose any processor and other NiFi component to create suitable flow for their organisation or client.\nApache NiFi offers the concept of Templates, which makes it easier to reuse and distribute the NiFi flows. The flows can be used by other developers or in other NiFi clusters. It also helps NiFi developers to share their work in repositories like GitHub.\nLet us create a template for the flow, which we created in chapter no 15 “Apache NiFi - Creating Flows”.\nSelect all the components of the flow using shift key and then click on the create template icon at the left hand side of the NiFi canvas. You can also see a tool box as shown in the above image. Click on the icon create template marked in blue as in the above picture. Enter the name for the template. A developer can also add description, which is optional.\nThen go to the NiFi templates option in the menu present at the top right hand corner of NiFi UI as show in the picture below.\nNow click the download icon (present at the right hand side in the list) of the template, you want to download. An XML file with the template name will get downloaded.\nTo use a template in NiFi, a developer will have to upload its xml file to NiFi using UI. There is an Upload Template icon (marked with blue in below image) beside Create Template icon click on that and browse the xml.\nIn the top toolbar of NiFi UI, the template icon is before the label icon. The icon is marked in blue as shown in the picture below.\nDrag the template icon and choose the template from the drop down list and click add. It will add the template to NiFi canvas.\nNiFi offers a large number of API, which helps developers to make changes and get information of NiFi from any other tool or custom developed applications. In this tutorial, we will use postman app in google chrome to explain some examples.\nTo add postmantoyour Google Chrome, go to the below mentioned URL and click add to chrome button. You will now see a new app added toyour Google Chrome.\nchrome web store\nThe current version of NiFi rest API is 1.8.0 and the documentation is present in the below mentioned URL.\nhttps://nifi.apache.org/docs/nifi-docs/rest-api/index.html\nFollowing are the most used NiFi rest API Modules −\nhttp://:/nifi-api/\nhttp://:/nifi-api/\nIn case HTTPS is enabled\nhttps://:/nifi-api/\nIn case HTTPS is enabled\nhttps://:/nifi-api/\nLet us now consider an example and run on postman to get the details about the running NiFi instance.\nGET http://localhost:8080/nifi-api/flow/about\n\n{\n \"about\": {\n \"title\": \"NiFi\",\n \"version\": \"1.7.1\",\n \"uri\": \"http://localhost:8080/nifi-api/\",\n \"contentViewerUrl\": \"../nifi-content-viewer/\",\n \"timezone\": \"SGT\",\n \"buildTag\": \"nifi-1.7.1-RC1\",\n \"buildTimestamp\": \"07/12/2018 12:54:43 SGT\"\n }\n}\n\nApache NiFi logs and store every information about the events occur on the ingested data in the flow. Data provenance repository stores this information and provides UI to search this event information. Data provenance can be accessed for full NiFi level and processor level also.\nThe following table lists down the different fields in the NiFi Data Provenance event list have following fields −\nTo get more information about the event, a user can click on the information icon present in the first column of the NiFi Data Provenance UI.\nThere are some properties in nifi.properties file, which are used to manage NiFi Data Provenance repository.\nIn Apache NiFi, there are multiple ways to monitor the different statistics of the system like errors, memory usage, CPU usage, Data Flow statistics, etc. We will discuss the most popular ones in this tutorial.\nIn this section, we will learn more about in built monitoring in Apache NiFi.\nThe bulletin board shows the latest ERROR and WARNING getting generated by NiFi processors in real time. To access the bulletin board, a user will have to go the right hand drop down menu and select the Bulletin Board option. It refreshes automatically and a user can disable it also. A user can also navigate to the actual processor by double-clicking the error. A user can also filter the bulletins by working out with the following −\nby message\nby name\nby id\nby group id\nTo monitor the Events occurring on any specific processor or throughout NiFi, a user can access the Data provenance from the same menu as the bulletin board. A user can also filter the events in data provenance repository by working out with the following fields −\nby component name\nby component type\nby type\nApache NiFi summary also can be accessed from the same menu as the bulletin board. This UI contains information about all the components of that particular NiFi instance or cluster. They can be filtered by name, by type or by URI. There are different tabs for different component types. Following are the components, which can be monitored in the NiFi summary UI −\nProcessors\nInput ports\nOutput ports\nRemote process groups\nConnections\nProcess groups\nIn this UI, there is a link at the bottom right hand side named system diagnostics to check the JVM statistics.\nApache NiFi provides multiple reporting tasks to support external monitoring systems like Ambari, Grafana, etc. A developer can create a custom reporting task or can configure the inbuilt ones to send the metrics of NiFi to the externals monitoring systems. The following table lists down the reporting tasks offered by NiFi 1.7.1.\nThere is an API named system diagnostics, which can be used to monitor the NiFI stats in any custom developed application. Let us check the API in postman.\nhttp://localhost:8080/nifi-api/system-diagnostics\n\n{\n \"systemDiagnostics\": {\n \"aggregateSnapshot\": {\n \"totalNonHeap\": \"183.89 MB\",\n \"totalNonHeapBytes\": 192819200,\n \"usedNonHeap\": \"173.47 MB\",\n \"usedNonHeapBytes\": 181894560,\n \"freeNonHeap\": \"10.42 MB\",\n \"freeNonHeapBytes\": 10924640,\n \"maxNonHeap\": \"-1 bytes\",\n \"maxNonHeapBytes\": -1,\n \"totalHeap\": \"512 MB\",\n \"totalHeapBytes\": 536870912,\n \"usedHeap\": \"273.37 MB\",\n \"usedHeapBytes\": 286652264,\n \"freeHeap\": \"238.63 MB\",\n \"freeHeapBytes\": 250218648,\n \"maxHeap\": \"512 MB\",\n \"maxHeapBytes\": 536870912,\n \"heapUtilization\": \"53.0%\",\n \"availableProcessors\": 4,\n \"processorLoadAverage\": -1,\n \"totalThreads\": 71,\n \"daemonThreads\": 31,\n \"uptime\": \"17:30:35.277\",\n \"flowFileRepositoryStorageUsage\": {\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n },\n \"contentRepositoryStorageUsage\": [\n {\n \"identifier\": \"default\",\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n }\n ],\n \"provenanceRepositoryStorageUsage\": [\n {\n \"identifier\": \"default\",\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n }\n ],\n \"garbageCollection\": [\n {\n \"name\": \"G1 Young Generation\",\n \"collectionCount\": 344,\n \"collectionTime\": \"00:00:06.239\",\n \"collectionMillis\": 6239\n },\n {\n \"name\": \"G1 Old Generation\",\n \"collectionCount\": 0,\n \"collectionTime\": \"00:00:00.000\",\n \"collectionMillis\": 0\n }\n ],\n \"statsLastRefreshed\": \"09:30:20 SGT\",\n \"versionInfo\": {\n \"niFiVersion\": \"1.7.1\",\n \"javaVendor\": \"Oracle Corporation\",\n \"javaVersion\": \"1.8.0_151\",\n \"osName\": \"Windows 7\",\n \"osVersion\": \"6.1\",\n \"osArchitecture\": \"amd64\",\n \"buildTag\": \"nifi-1.7.1-RC1\",\n \"buildTimestamp\": \"07/12/2018 12:54:43 SGT\"\n }\n }\n }\n}\n\nBefore starting the upgrade of Apache NiFi, read the release notes to know about the changes and additions. A user needs to evaluate the impact of these additions and changes in his/her current NiFi installation. Below is the link to get the release notes for the new releases of Apache NiFi.\nhttps://cwiki.apache.org/confluence/display/NIFI/Release+Notes\nIn a cluster setup, a user needs to upgrade NiFi installation of every Node in a cluster. Follow the steps given below to upgrade the Apache NiFi.\nBackup all the custom NARs present in your current NiFi or lib or any other folder.\nBackup all the custom NARs present in your current NiFi or lib or any other folder.\nDownload the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version.\nhttps://nifi.apache.org/download.html\nDownload the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version.\nhttps://nifi.apache.org/download.html\nCreate a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi.\nCreate a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi.\nStop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi.\nStop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi.\nCopy the configuration of authorizers.xml from current NiFi installation to the new version.\nCopy the configuration of authorizers.xml from current NiFi installation to the new version.\nUpdate the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one.\nUpdate the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one.\nAdd the custom logging from logback.xml to the new NiFi installation.\nAdd the custom logging from logback.xml to the new NiFi installation.\nConfigure the login identity provider in login-identity-providers.xml from the current version.\nConfigure the login identity provider in login-identity-providers.xml from the current version.\nUpdate all the properties in nifi.properties of the new NiFi installation from current version.\nUpdate all the properties in nifi.properties of the new NiFi installation from current version.\nPlease make sure that the group and user of new version is same as the current version, to avoid any permission denied errors.\nPlease make sure that the group and user of new version is same as the current version, to avoid any permission denied errors.\nCopy the configuration from state-management.xml of current version to the new version.\nCopy the configuration from state-management.xml of current version to the new version.\nCopy the contents of the following directories from current version of NiFi installation to the same directories in the new version.\n\n./conf/flow.xml.gz\nAlso flow.xml.gz from the archive directory.\nFor provenance and content repositories change the values in nifi. properties file to the current repositories.\ncopy state from ./state/local or change in nifi.properties if any other external directory is specified.\n\n\nCopy the contents of the following directories from current version of NiFi installation to the same directories in the new version.\n./conf/flow.xml.gz\n./conf/flow.xml.gz\nAlso flow.xml.gz from the archive directory.\nAlso flow.xml.gz from the archive directory.\nFor provenance and content repositories change the values in nifi. properties file to the current repositories.\nFor provenance and content repositories change the values in nifi. properties file to the current repositories.\ncopy state from ./state/local or change in nifi.properties if any other external directory is specified.\ncopy state from ./state/local or change in nifi.properties if any other external directory is specified.\nRecheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions.\nRecheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions.\nStart all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors.\nStart all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors.\nMonitor bulletins for some time to check for any new errors.\nMonitor bulletins for some time to check for any new errors.\nIf the new version is working correctly, then the current version can be archived and deleted from the directories.\nIf the new version is working correctly, then the current version can be archived and deleted from the directories.\nApache NiFi Remote Process Group or RPG enables flow to direct the FlowFiles in a flow to different NiFi instances using Site-to-Site protocol. As of version 1.7.1, NiFi does not offer balanced relationships, so RPG is used for load balancing in a NiFi data flow.\nA developer can add the RPG from the top toolbar of NiFi UI by dragging the icon as shown in the above picture to canvas. To configure an RPG, a Developer has to add the following fields −\nA developer needs to enable it, before using it like we start processors before using them.\nApache NiFi offers shared services, which can be shared by processors and reporting task is called controller settings. These are like Database connection pool, which can be used by processors accessing same database.\nTo access the controller settings, use the drop down menu at the right top corner of NiFi UI as shown in the below image.\nThere are many controller settings offered by Apache NiFi, we will discuss a commonly used one and how we set it up in NiFi.\nAdd the plus sign in the Nifi Settings page after clicking the Controller settings option. Then select the DBCPConnectionPool from the list of controller settings. DBCPConnectionPool will be added in the main NiFi settings page as shown in the below image.\nIt contains the following information about the controller setting:Name\nType\nBundle\nState\nScope\nConfigure and delete icon\nClick on the configure icon and fill the required fields. The fields are listed down in the table below −\nTo stop or configure a controller setting, first all the attached NiFi components should be stopped. NiFi also adds scope in controller settings to manage the configuration of it. Therefore, only the ones which shared the same settings will not get impacted and will use the same controller settings.\nApache NiFi reporting tasks are similar to the controller services, which run in the background and send or log the statistics of NiFi instance. NiFi reporting task can also be accessed from the same page as controller settings, but in a different tab.\nTo add a reporting task, a developer needs to click on the plus button present at the top right hand side of the reporting tasks page. These reporting tasks are mainly used for monitoring the activities of a NiFi instance, in either the bulletins or the provenance. Mainly these reporting tasks uses Site-to-Site to transport the NiFi statistics data to other node or external system.\nLet us now add a configured reporting task for more understanding.\nThis reporting task is used to generate bulletins, when a memory pool crosses specified percentage. Follow these steps to configure the MonitorMemory reporting task −\nAdd in the plus sign and search for MonitorMemory in the list.\nAdd in the plus sign and search for MonitorMemory in the list.\nSelect MonitorMemory and click on ADD.\nSelect MonitorMemory and click on ADD.\nOnce it is added in the main page of reporting tasks main page, click on the configure icon.\nOnce it is added in the main page of reporting tasks main page, click on the configure icon.\nIn the properties tab, select the memory pool, which you want to monitor.\nIn the properties tab, select the memory pool, which you want to monitor.\nSelect the percentage after which you want bulletins to alert the users.\n\nSelect the percentage after which you want bulletins to alert the users.\n\nStart the reporting task.\nStart the reporting task.\nApache NiFi is an open source platform and gives developers the options to add their custom processor in the NiFi library. Follow these steps to create a custom processor.\nDownload Maven latest version from the link given below.\nhttps://maven.apache.org/download.cgi\nDownload Maven latest version from the link given below.\nhttps://maven.apache.org/download.cgi\nAdd an environment variable named M2_HOME and set value as the installation directory of maven.\nAdd an environment variable named M2_HOME and set value as the installation directory of maven.\nDownload Eclipse IDE from the below link.\nhttps://www.eclipse.org/downloads/download.php\n\nDownload Eclipse IDE from the below link.\nhttps://www.eclipse.org/downloads/download.php\n\nOpen command prompt and execute Maven Archetype command.\nOpen command prompt and execute Maven Archetype command.\n> mvn archetype:generate\n\nSearch for the nifi type in the archetype projects.\nSearch for the nifi type in the archetype projects.\nSelect org.apache.nifi:nifi-processor-bundle-archetype project.\nSelect org.apache.nifi:nifi-processor-bundle-archetype project.\nThen from the list of versions select the latest version i.e. 1.7.1 for this tutorial.\nThen from the list of versions select the latest version i.e. 1.7.1 for this tutorial.\nEnter the groupId, artifactId, version, package, and artifactBaseName etc.\nEnter the groupId, artifactId, version, package, and artifactBaseName etc.\nThen a maven project will be created having to directories.\n\nnifi--processors\nnifi--nar\n\n\nThen a maven project will be created having to directories.\nnifi--processors\nnifi--processors\nnifi--nar\nnifi--nar\nRun the below command in nifi--processors directory to add the project in the eclipse.\nRun the below command in nifi--processors directory to add the project in the eclipse.\nmvn install eclipse:eclipse\n\nOpen eclipse and select import from the file menu.\nOpen eclipse and select import from the file menu.\nThen select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse.\nThen select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse.\nAdd your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run.\nAdd your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run.\nThen package the code to a NAR file by running the below mentioned command.\nThen package the code to a NAR file by running the below mentioned command.\nmvn clean install\n\nA NAR file will be created at nifi--nar/target directory.\nA NAR file will be created at nifi--nar/target directory.\nCopy the NAR file to the lib folder of Apache NiFi and restart the NiFi.\nCopy the NAR file to the lib folder of Apache NiFi and restart the NiFi.\nAfter successful restart of NiFi, check the processor list for the new custom processor.\nAfter successful restart of NiFi, check the processor list for the new custom processor.\nFor any errors, check ./logs/nifi.log file.\nFor any errors, check ./logs/nifi.log file.\nApache NiFi is an open source platform and gives developers the options to add their custom controllers service in Apache NiFi. The steps and tools are almost the same as used to create a custom processor.\nOpen command prompt and execute Maven Archetype command.\nOpen command prompt and execute Maven Archetype command.\n> mvn archetype:generate\n\nSearch for the nifi type in the archetype projects.\nSearch for the nifi type in the archetype projects.\nSelect org.apache.nifi:nifi-service-bundle-archetype project.\nSelect org.apache.nifi:nifi-service-bundle-archetype project.\nThen from the list of versions, select the latest version – 1.7.1 for this tutorial.\nThen from the list of versions, select the latest version – 1.7.1 for this tutorial.\nEnter the groupId, artifactId, version, package, and artifactBaseName, etc.\nEnter the groupId, artifactId, version, package, and artifactBaseName, etc.\nA maven project will be created having directories.\n\nnifi-\nnifi--nar\nnifi--api\nnifi--api-nar\n\n\nA maven project will be created having directories.\nnifi-\nnifi-\nnifi--nar\nnifi--nar\nnifi--api\nnifi--api\nnifi--api-nar\nnifi--api-nar\nRun the below command in nifi- and nifi--api directories to add these two projects in the eclipse.\n\nmvn install eclipse:eclipse\n\n\nRun the below command in nifi- and nifi--api directories to add these two projects in the eclipse.\nmvn install eclipse:eclipse\nmvn install eclipse:eclipse\nOpen eclipse and select import from the file menu.\nOpen eclipse and select import from the file menu.\nThen select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse.\nThen select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse.\nAdd your code in the source files.\nAdd your code in the source files.\nThen package the code to a NAR file by running the below mentioned command.\n\nmvn clean install\n\n\nThen package the code to a NAR file by running the below mentioned command.\nmvn clean install\nmvn clean install\nTwo NAR files will be created in each nifi-/target and nifi--api/target directory.\nTwo NAR files will be created in each nifi-/target and nifi--api/target directory.\nCopy these NAR files to the lib folder of Apache NiFi and restart the NiFi.\nCopy these NAR files to the lib folder of Apache NiFi and restart the NiFi.\nAfter successful restart of NiFi, check the processor list for the new custom processor.\nAfter successful restart of NiFi, check the processor list for the new custom processor.\nFor any errors, check ./logs/nifi.log file.\nFor any errors, check ./logs/nifi.log file.\nApache NiFi uses logback library to handle its logging. There is a file logback.xml present in the conf directory of NiFi, which is used to configure the logging in NiFi. The logs are generated in logs folder of NiFi and the log files are as described below.\nThis is the main log file of nifi, which logs all the activities of apache NiFi application ranging from NAR files loading to the run time errors or bulletins encountered by NiFi components. Below is the default appender in logback.xml file for nifi-app.log file.\n\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-app.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/\n\t nifi-app_%d{yyyy-MM-dd_HH}.%i.log\n \n 100MB\n 30\n \n true\n \n %date %level [%thread] %logger{40} %msg%n\n \n\nThe appender name is APP_FILE, and the class is RollingFileAppender, which means logger is using rollback policy. By default, the max file size is 100 MB and can be changed to the required size. The maximum retention for APP_FILE is 30 log files and can be changed as per the user requirement.\nThis log contains the user events like web security, web api config, user authorization, etc. Below is the appender for nifi-user.log in logback.xml file.\n\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-user.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/\n\t nifi-user_%d.log\n \n 30\n \n \n %date %level [%thread] %logger{40} %msg%n\n \n\nThe appender name is USER_FILE. It follows the rollover policy. The maximum retention period for USER_FILE is 30 log files. Below is the default loggers for USER_FILE appender present in nifi-user.log.\n\n \n\n\n \n\n\n \n\n\n \n\n\n \n\nThis log contains the bootstrap logs, apache NiFi’s standard output (all system.out written in the code mainly for debugging), and standard error (all system.err written in the code). Below is the default appender for the nifi-bootstrap.log in logback.log.\n\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap_%d.log\n \n 5\n \n \n %date %level [%thread] %logger{40} %msg%n\n \n\nnifi-bootstrap.log file,s appender name is BOOTSTRAP_FILE, which also follows rollback policy. The maximum retention for BOOTSTRAP_FILE appender is 5 log files. Below is the default loggers for nifi-bootstrap.log file.\n\n \n\n\n \n \n\n\n \n\n\n \n\n\n 46 Lectures \n 3.5 hours \n\n Arnab Chakraborty\n\n 23 Lectures \n 1.5 hours \n\n Mukund Kumar Mishra\n\n 16 Lectures \n 1 hours \n\n Nilay Mehta\n\n 52 Lectures \n 1.5 hours \n\n Bigdata Engineer\n\n 14 Lectures \n 1 hours \n\n Bigdata Engineer\n\n 23 Lectures \n 1 hours \n\n Bigdata Engineer\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":2665,"s":2318,"text":"Apache NiFi is a powerful, easy to use and reliable system to process and distribute data between disparate systems. It is based on Niagara Files technology developed by NSA and then after 8 years donated to Apache Software foundation. It is distributed under Apache License Version 2.0, January 2004. The latest version for Apache NiFi is 1.7.1."},{"code":null,"e":3083,"s":2665,"text":"Apache NiFi is a real time data ingestion platform, which can transfer and manage data transfer between different sources and destination systems. It supports a wide variety of data formats like logs, geo location data, social feeds, etc. It also supports many protocols like SFTP, HDFS, and KAFKA, etc. This support to wide variety of data sources and protocols making this platform popular in many IT organizations."},{"code":null,"e":3136,"s":3083,"text":"The general features of Apache NiFi are as follows −"},{"code":null,"e":3271,"s":3136,"text":"Apache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring."},{"code":null,"e":3406,"s":3271,"text":"Apache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring."},{"code":null,"e":3573,"s":3406,"text":"It is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime."},{"code":null,"e":3740,"s":3573,"text":"It is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime."},{"code":null,"e":3845,"s":3740,"text":"It also provides data provenance module to track and monitor data from the start to the end of the flow."},{"code":null,"e":3950,"s":3845,"text":"It also provides data provenance module to track and monitor data from the start to the end of the flow."},{"code":null,"e":4046,"s":3950,"text":"Developers can create their own custom processors and reporting tasks according to their needs."},{"code":null,"e":4142,"s":4046,"text":"Developers can create their own custom processors and reporting tasks according to their needs."},{"code":null,"e":4233,"s":4142,"text":"NiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions."},{"code":null,"e":4324,"s":4233,"text":"NiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions."},{"code":null,"e":4422,"s":4324,"text":"It also supports user and role management and also can be configured with LDAP for authorization."},{"code":null,"e":4520,"s":4422,"text":"It also supports user and role management and also can be configured with LDAP for authorization."},{"code":null,"e":4569,"s":4520,"text":"The key concepts of Apache NiFi are as follows −"},{"code":null,"e":4681,"s":4569,"text":"Process Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner."},{"code":null,"e":4793,"s":4681,"text":"Process Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner."},{"code":null,"e":4955,"s":4793,"text":"Flow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources."},{"code":null,"e":5117,"s":4955,"text":"Flow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources."},{"code":null,"e":5334,"s":5117,"text":"Processor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile."},{"code":null,"e":5551,"s":5334,"text":"Processor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile."},{"code":null,"e":5887,"s":5551,"text":"Flowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow."},{"code":null,"e":6223,"s":5887,"text":"Flowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow."},{"code":null,"e":6354,"s":6223,"text":"Event − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance."},{"code":null,"e":6485,"s":6354,"text":"Event − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance."},{"code":null,"e":6693,"s":6485,"text":"Data provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile."},{"code":null,"e":6901,"s":6693,"text":"Data provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile."},{"code":null,"e":6999,"s":6901,"text":"Apache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage."},{"code":null,"e":7097,"s":6999,"text":"Apache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage."},{"code":null,"e":7256,"s":7097,"text":"Apache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing."},{"code":null,"e":7415,"s":7256,"text":"Apache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing."},{"code":null,"e":7508,"s":7415,"text":"It also provides security policies on user level, process group level and other modules too."},{"code":null,"e":7601,"s":7508,"text":"It also provides security policies on user level, process group level and other modules too."},{"code":null,"e":7686,"s":7601,"text":"Its UI can also run on HTTPS, which makes the interaction of users with NiFi secure."},{"code":null,"e":7771,"s":7686,"text":"Its UI can also run on HTTPS, which makes the interaction of users with NiFi secure."},{"code":null,"e":7892,"s":7771,"text":"NiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems."},{"code":null,"e":8013,"s":7892,"text":"NiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems."},{"code":null,"e":8239,"s":8013,"text":"When node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node."},{"code":null,"e":8465,"s":8239,"text":"When node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node."},{"code":null,"e":8617,"s":8465,"text":"Apache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems."},{"code":null,"e":8769,"s":8617,"text":"Apache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems."},{"code":null,"e":8999,"s":8769,"text":"Apache NiFi consist of a web server, flow controller and a processor, which runs on Java Virtual Machine. It also has 3 repositories Flowfile Repository, Content Repository, and Provenance Repository as shown in the figure below."},{"code":null,"e":9319,"s":8999,"text":"This repository stores the current state and attributes of every flowfile that goes through the data flows of apache NiFi. The default location of this repository is in the root directory of apache NiFi. The location of this repository can be changed by changing the property named \"nifi.flowfile.repository.directory\"."},{"code":null,"e":9663,"s":9319,"text":"This repository contains all the content present in all the flowfiles of NiFi. Its default directory is also in the root directory of NiFi and it can be changed using \"org.apache.nifi.controller.repository.FileSystemRepository\" property. This directory uses large space in disk so it is advisable to have enough space in the installation disk."},{"code":null,"e":10179,"s":9663,"text":"The repository tracks and stores all the events of all the flowfiles that flow in NiFi. There are two provenance repositories - volatile provenance repository (in this repository all the provenance data get lost after restart) and persistent provenance repository. Its default directory is also in the root directory of NiFi and it can be changed using \"org.apache.nifi.provenance.PersistentProvenanceRepository\" and \"org.apache.nifi.provenance.VolatileProvenanceRepositor\" property for the respective repositories."},{"code":null,"e":10312,"s":10179,"text":"In this chapter, we will learn about the environment setup ofApache NiFi. The steps for installation of Apache NiFi are as follows −"},{"code":null,"e":10458,"s":10312,"text":"Step 1 − Install the current version of Java in your computer. Please set theJAVA_HOME in your machine. You can check the version as shown below:"},{"code":null,"e":10516,"s":10458,"text":"In Windows Operating System (OS) (using command prompt) −"},{"code":null,"e":10533,"s":10516,"text":"> java -version\n"},{"code":null,"e":10562,"s":10533,"text":"In UNIX OS (Using Terminal):"},{"code":null,"e":10581,"s":10562,"text":"$ echo $JAVA_HOME\n"},{"code":null,"e":10656,"s":10584,"text":"Step 2 − DownloadApache NiFi from https://nifi.apache.org/download.html"},{"code":null,"e":10689,"s":10656,"text":"For windows OSdownload ZIP file."},{"code":null,"e":10722,"s":10689,"text":"For windows OSdownload ZIP file."},{"code":null,"e":10752,"s":10722,"text":"For UNIX OSdownload TAR file."},{"code":null,"e":10782,"s":10752,"text":"For UNIX OSdownload TAR file."},{"code":null,"e":10864,"s":10782,"text":"For docker images,go to the following link https://hub.docker.com/r/apache/nifi/."},{"code":null,"e":10946,"s":10864,"text":"For docker images,go to the following link https://hub.docker.com/r/apache/nifi/."},{"code":null,"e":11044,"s":10946,"text":"Step 3 − The installation process for Apache NiFi is very easy. The process differs with the OS −"},{"code":null,"e":11113,"s":11044,"text":"Windows OS − Unzip the zip package and the Apache NiFi is installed."},{"code":null,"e":11182,"s":11113,"text":"Windows OS − Unzip the zip package and the Apache NiFi is installed."},{"code":null,"e":11256,"s":11182,"text":"UNIX OS − Extract tar file in any location and the Logstash is installed."},{"code":null,"e":11330,"s":11256,"text":"UNIX OS − Extract tar file in any location and the Logstash is installed."},{"code":null,"e":11363,"s":11330,"text":"$tar -xvf nifi-1.6.0-bin.tar.gz\n"},{"code":null,"e":11489,"s":11363,"text":"Step 4 − Open command prompt, go to the bin directory of NiFi. For example, C:\\nifi-1.7.1\\bin, and execute run-nifi.bat file."},{"code":null,"e":11521,"s":11489,"text":"C:\\nifi-1.7.1\\bin>run-nifi.bat\n"},{"code":null,"e":11694,"s":11521,"text":"Step 5 − It will take a few minutes to get the NiFi UI up. A user cancheck nifi-app.log, once NiFi UI is up then, a user can enter http://localhost:8080/nifi/ to access UI."},{"code":null,"e":11949,"s":11694,"text":"Apache is a web-based platform that can be accessed by a user using web UI. The NiFi UI is very interactive and provides a wide variety of information about NiFi. As shown in the image below, a user can access information about the following attributes −"},{"code":null,"e":11964,"s":11949,"text":"Active Threads"},{"code":null,"e":11982,"s":11964,"text":"Total queued data"},{"code":null,"e":12017,"s":11982,"text":"Transmitting Remote Process Groups"},{"code":null,"e":12056,"s":12017,"text":"Not Transmitting Remote Process Groups"},{"code":null,"e":12075,"s":12056,"text":"Running Components"},{"code":null,"e":12094,"s":12075,"text":"Stopped Components"},{"code":null,"e":12113,"s":12094,"text":"Invalid Components"},{"code":null,"e":12133,"s":12113,"text":"Disabled Components"},{"code":null,"e":12169,"s":12133,"text":"Up to date Versioned Process Groups"},{"code":null,"e":12211,"s":12169,"text":"Locally modified Versioned Process Groups"},{"code":null,"e":12242,"s":12211,"text":"Stale Versioned Process Groups"},{"code":null,"e":12294,"s":12242,"text":"Locally modified and Stale Versioned Process Groups"},{"code":null,"e":12332,"s":12294,"text":"Sync failure Versioned Process Groups"},{"code":null,"e":12378,"s":12332,"text":"Apache NiFi UI has the following components −"},{"code":null,"e":12483,"s":12378,"text":"User can drag the process icon on the canvas and select the desired processor for the data flow in NiFi."},{"code":null,"e":12557,"s":12483,"text":"Below icon is dragged to canvas to add the input port into any data flow."},{"code":null,"e":12652,"s":12557,"text":"Input port is used to get data from the processor, which is not present in that process group."},{"code":null,"e":12765,"s":12652,"text":"After dragging this icon, NiFi asks to enter the name of the Input port and then it is added to the NiFi canvas."},{"code":null,"e":12844,"s":12765,"text":"The below icon is dragged to canvas to add the output port into any data flow."},{"code":null,"e":12947,"s":12844,"text":"The output port is used to transfer data to the processor, which is not present in that process group."},{"code":null,"e":13061,"s":12947,"text":"After dragging this icon, NiFi asks to enter the name of the Output port and then it is added to the NiFi canvas."},{"code":null,"e":13125,"s":13061,"text":"A user uses below icon to add process group in the NiFi canvas."},{"code":null,"e":13241,"s":13125,"text":"After dragging this icon, NiFi asks to enter the name of the Process Group and then it is added to the NiFi canvas."},{"code":null,"e":13298,"s":13241,"text":"This is used to add Remote process group in NiFi canvas."},{"code":null,"e":13442,"s":13298,"text":"Funnel is used to transfer the output of a processor to multiple processors. User can use the below icon to add the funnel in a NiFi data flow."},{"code":null,"e":13579,"s":13442,"text":"This icon is used to add a data flow template to NiFi canvas. This helps to reuse the data flow in the same or different NiFi instances."},{"code":null,"e":13654,"s":13579,"text":"After dragging, a user can select the templates already added in the NiFi."},{"code":null,"e":13800,"s":13654,"text":"These are used to add text on NiFi canvas about any component present in NiFi. It offers a range of colors used by a user to add aesthetic sense."},{"code":null,"e":14123,"s":13800,"text":"Apache NiFi processors are the basic blocks of creating a data flow. Every processor has different functionality, which contributes to the creation of output flowfile. Dataflow shown in the image below is fetching file from one directory using GetFile processor and storing it in another directory using PutFile processor."},{"code":null,"e":14327,"s":14123,"text":"GetFile process is used to fetch files of a specific format from a specific directory. It also provides other options to user for more control on fetching. We will discuss it in properties section below."},{"code":null,"e":14387,"s":14327,"text":"Following are the different settings of GetFile processor −"},{"code":null,"e":14536,"s":14387,"text":"In the Name setting, a user can define any name for the processors either according to the project or by that, which makes the name more meaningful."},{"code":null,"e":14599,"s":14536,"text":"A user can enable or disable the processor using this setting."},{"code":null,"e":14692,"s":14599,"text":"This setting lets a user to add the penalty time duration, in the event of flowfile failure."},{"code":null,"e":14808,"s":14692,"text":"This setting is used to specify the yield time for processor. In this duration, the process is not scheduled again."},{"code":null,"e":14873,"s":14808,"text":"This setting is used to specify the log level of that processor."},{"code":null,"e":15092,"s":14873,"text":"This has a list of check of all the available relationship of that particular process. By checking the boxes, a user can program processor to terminate the flowfile on that event and do not send it further in the flow."},{"code":null,"e":15170,"s":15092,"text":"These are the following scheduling options offered by the GetFile processor −"},{"code":null,"e":15307,"s":15170,"text":"You can either schedule the process on time basis by selecting time driven or a specified CRON string by selecting a CRON driver option."},{"code":null,"e":15386,"s":15307,"text":"This option is used to define the concurrent task schedule for this processor."},{"code":null,"e":15492,"s":15386,"text":"A user can define whether to run the processor in all nodes or only in Primary node by using this option."},{"code":null,"e":15592,"s":15492,"text":"It is used to define the time for time driven strategy or CRON expression for CRON driven strategy."},{"code":null,"e":15848,"s":15592,"text":"GetFile offers multiple properties as shown in the image below raging compulsory\nproperties like Input directory and file filter to optional properties like Path Filter and Maximum file Size. A user can manage file fetching process using these properties."},{"code":null,"e":15913,"s":15848,"text":"This Section is used to specify any information about processor."},{"code":null,"e":16004,"s":15913,"text":"The PutFile processor is used to store the file from the data flow to a specific location."},{"code":null,"e":16055,"s":16004,"text":"The PutFile processor has the following settings −"},{"code":null,"e":16203,"s":16055,"text":"In the Name setting, a user can define any name for the processors either according to the project or by that which makes the name more meaningful."},{"code":null,"e":16266,"s":16203,"text":"A user can enable or disable the processor using this setting."},{"code":null,"e":16356,"s":16266,"text":"This setting lets a user add the penalty time duration, in the event of flowfile failure."},{"code":null,"e":16478,"s":16356,"text":"This setting is used to specify the yield time for processor. In this duration, the process does not get scheduled again."},{"code":null,"e":16543,"s":16478,"text":"This setting is used to specify the log level of that processor."},{"code":null,"e":16769,"s":16543,"text":"This settings has a list of check of all the available relationship of that particular process. By checking the boxes, user can program processor to terminate the flowfile on that event and do not send it further in the flow."},{"code":null,"e":16847,"s":16769,"text":"These are the following scheduling options offered by the PutFile processor −"},{"code":null,"e":17090,"s":16847,"text":"You can schedule the process on time basis either by selecting timer driven or a specified CRON string by selecting CRON driver option. There is also an Experimental strategy Event Driven, which will trigger the processor on a specific event."},{"code":null,"e":17169,"s":17090,"text":"This option is used to define the concurrent task schedule for this processor."},{"code":null,"e":17275,"s":17169,"text":"A user can define whether to run the processor in all nodes or only in primary node by using this option."},{"code":null,"e":17376,"s":17275,"text":"It is used to define the time for timer driven strategy or CRON expression for CRON driven strategy."},{"code":null,"e":17561,"s":17376,"text":"The PutFile processor provides properties like Directory to specify the output directory for the purpose of file transfer and others to manage the transfer as shown in the image below."},{"code":null,"e":17626,"s":17561,"text":"This Section is used to specify any information about processor."},{"code":null,"e":17698,"s":17626,"text":"In this chapter, we will discuss process categorization in Apache NiFi."},{"code":null,"e":17961,"s":17698,"text":"The processors under Data Ingestion category are used to ingest data into the NiFi data flow. These are mainly the starting point of any data flow in apache NiFi. Some of the processors that belong to these categories are GetFile, GetHTTP, GetFTP, GetKAFKA, etc."},{"code":null,"e":18327,"s":17961,"text":"Routing and Mediation processors are used to route the flowfiles to different processors or data flows according to the information in attributes or content of those flowfiles. These processors are also responsible to control the NiFi data flows. Some of the processors that belong to this category are RouteOnAttribute, RouteOnContent, ControlRate, RouteText, etc."},{"code":null,"e":18685,"s":18327,"text":"The processors of this Database Access category are capable of selecting or inserting data or executing and preparing other SQL statements from database. These processors mainly use data connection pool controller setting of Apache NiFi. Some of the processors that belong to this category are ExecuteSQL, PutSQL, PutDatabaseRecord, ListDatabaseTables, etc."},{"code":null,"e":18942,"s":18685,"text":"Attribute Extraction Processors are responsible to extract, analyze, change flowfile attributes processing in the NiFi data flow. Some of the processors that belong to this category are UpdateAttribute, EvaluateJSONPath, ExtractText, AttributesToJSON, etc."},{"code":null,"e":19261,"s":18942,"text":"System Interaction processors are used to run processes or commands in any operating system. These processors also run scripts in many languages to interact with a variety of systems. Some of the processors that belong to this category are ExecuteScript, ExecuteProcess, ExecuteGroovyScript, ExecuteStreamCommand, etc."},{"code":null,"e":19596,"s":19261,"text":"Processors that belong to Data Transformation are capable of altering content of the flowfiles. These can be used to fully replace the data of a flowfile normally used when a user has to send flowfile as an HTTP body to invokeHTTP processor. Some of the processors that belong to this category are ReplaceText, JoltTransformJSON, etc."},{"code":null,"e":19965,"s":19596,"text":"Sending Data Processors are generally the end processor in a data flow. These processors are responsible to store or send data to the destination server. After successful storing or sending the data, these processors DROP the flowfile with success relationship. Some of the processors that belong to this category are PutEmail, PutKafka, PutSFTP, PutFile, PutFTP, etc."},{"code":null,"e":20166,"s":19965,"text":"These processors are used to split and merge the content present in a flowfile. Some of the processors that belong to this category are SplitText, SplitJson, SplitXml, MergeContent, SplitContent, etc."},{"code":null,"e":20314,"s":20166,"text":"These processors deal with the HTTP and HTTPS calls. Some of the processors that belong to this category are InvokeHTTP, PostHTTP, ListenHTTP, etc."},{"code":null,"e":20498,"s":20314,"text":"AWS processors are responsible to interaction with Amazon web services system. Some of the processors that belong to this category are GetSQS, PutSNS, PutS3Object, FetchS3Object, etc."},{"code":null,"e":20758,"s":20498,"text":"In an Apache NiFi data flow, flowfiles move from one to another processor through connection that gets validated using a relationship between processors. Whenever a connection is created, a developer selects one or more relationships between those processors."},{"code":null,"e":21005,"s":20758,"text":"As you can see in the above image, the check boxes in black rectangle are relationships. If a developer selects these check boxes then, the flowfile will terminate in that particular processor, when the relationship is success or failure or both."},{"code":null,"e":21216,"s":21005,"text":"When a processor successfully processes a flowfile like store or fetch data from any datasource without getting any connection, authentication or any other error, then the flowfile goes to success relationship."},{"code":null,"e":21386,"s":21216,"text":"When a processor is not able to process a flowfile without errors like authentication error or connection problem, etc. then the flowfile goes to a failure relationship."},{"code":null,"e":21616,"s":21386,"text":"A developer can also transfer the flowfiles to other processors using connections. The developer can select and also load balance it, but load balancing is just released in version 1.8, which will not be covered in this tutorial."},{"code":null,"e":21882,"s":21616,"text":"As you can see in the above image the connection marked in red have failure relationship, which means all flowfiles with errors will go to the processor in left and respectively all the flowfiles without errors will be transferred to the connection marked in green."},{"code":null,"e":21931,"s":21882,"text":"Let us now proceed with the other relationships."},{"code":null,"e":22050,"s":21931,"text":"This relationship is met, when a Flowfile could not be fetched from the remote server due to a communications failure."},{"code":null,"e":22166,"s":22050,"text":"Any Flowfile for which we receive a ‘Not Found’ message from the remote server will move to not.found relationship."},{"code":null,"e":22298,"s":22166,"text":"When NiFi unable to fetch a flowfile from the remote server due to insufficient permission, it will move through this relationship."},{"code":null,"e":22580,"s":22298,"text":"A flowfile is a basic processing entity in Apache NiFi. It contains data contents and attributes, which are used by NiFi processors to process data. The file content normally contains the data fetched from source systems. The most common attributes of an Apache NiFi FlowFile are −"},{"code":null,"e":22687,"s":22580,"text":"This stands for Universally Unique Identifier, which is a unique identity of a flowfile generated by NiFi."},{"code":null,"e":22792,"s":22687,"text":"This attribute contains the filename of that flowfile and it should not contain any directory structure."},{"code":null,"e":22841,"s":22792,"text":"It contains the size of an Apache NiFi FlowFile."},{"code":null,"e":22886,"s":22841,"text":"It specifies the MIME Type of this FlowFile."},{"code":null,"e":23002,"s":22886,"text":"This attribute contains the relative path of a file to which a flowfile belongs and does not contain the file name."},{"code":null,"e":23202,"s":23002,"text":"The Apache NiFi data flow connection has a queuing system to handle the large amount of data inflow. These queues can handle very large amount of FlowFiles to let the processor process them serially."},{"code":null,"e":23560,"s":23202,"text":"The queue in the above image has 1 flowfile transferred through success relationship. A user can check the flowfile by selecting the List queue option in the drop down list. In case of any overload or error, a user can also clear the queue by selecting the empty queue option and then the user can restart the flow to get those files again in the data flow."},{"code":null,"e":23816,"s":23560,"text":"The list of flowfiles in a queue, consist of position, UUID, Filename, File size, Queue Duration, and Lineage Duration. A user can see all the attributes and content of a flowfile by clicking the info icon present at the first column of the flowfile list."},{"code":null,"e":24009,"s":23816,"text":"In Apache NiFi, a user can maintain different data flows in different process groups. These groups can be based on different projects or the organizations, which Apache NiFi instance supports."},{"code":null,"e":24435,"s":24009,"text":"The fourth symbol in the menu at the top of the NiFi UI as shown in the above picture is used to add a process group in the NiFi canvas. The process group named\n“Tutorialspoint.com_ProcessGroup” contains a data flow with four processors currently in stop stage as you can see in the above picture. Process groups can be created in hierarchical manner to manage the data flows in better structure, which is easy to understand."},{"code":null,"e":24572,"s":24435,"text":"In the footer of NiFi UI, you can see the process groups and can go back to the top of the process group a user is currently present in."},{"code":null,"e":24921,"s":24572,"text":"To see the full list of process groups present in NiFi, a user can go to the summary by using the menu present in the left top side of the NiFi UI. In summary, there is process groups tab where all the process groups are listed with parameters like Version State, Transferred/Size, In/Size, Read/Write, Out/Size, etc. as shown in the below picture."},{"code":null,"e":25125,"s":24921,"text":"Apache NiFi offers labels to enable a developer to write information about the components present in the NiFI canvas. The leftmost icon in the top menu of NiFi UI is used to add the label in NiFi canvas."},{"code":null,"e":25277,"s":25125,"text":"A developer can change the color of the label and the size of the text with a right-click on the label and choose the appropriate option from the menu."},{"code":null,"e":25365,"s":25277,"text":"Apache NiFi is highly configurable platform. The nifi.properties file in conf directory"},{"code":null,"e":25401,"s":25365,"text":"contains most of the configuration."},{"code":null,"e":25462,"s":25401,"text":"The commonly used properties of Apache NiFi are as follows −"},{"code":null,"e":25545,"s":25462,"text":"This section contains the properties, which are compulsory to run a NiFi instance."},{"code":null,"e":25713,"s":25545,"text":"These properties are used to store the state of the components helpful to start the processing, where components left after a restart and in the next schedule running."},{"code":null,"e":25785,"s":25713,"text":"Let us now look into the important details of the FlowFile repository −"},{"code":null,"e":25992,"s":25785,"text":"Apache NiFi offers support to multiple tools like ambari, zookeeper for administration purposes. NiFi also provides configuration in nifi.properties file to set up HTTPS and other things for administrators."},{"code":null,"e":26280,"s":25992,"text":"NiFi itself does not handle voting process in cluster. This means when a cluster is created, all the nodes are primary and coordinator. So, zookeeper is configured to manage the voting of primary node and coordinator. The nifi.properties file contains some properties to setup zookeeper."},{"code":null,"e":26508,"s":26280,"text":"To use NiFi over HTTPS, administrators have to generate keystore and truststore and set some properties in the nifi.properties file. The TLS toolkit can be used to generate all the necessary keys to enable HTTPS in apache NiFi."},{"code":null,"e":26625,"s":26508,"text":"There are some other properties, which are used by administrators to manage the NiFi and for its service continuity."},{"code":null,"e":26913,"s":26625,"text":"Apache NiFi offers a large number of components to help developers to create data flows for any type of protocols or data sources. To create a flow, a developer drags the components from menu bar to canvas and connects them by clicking and dragging the mouse from one component to other."},{"code":null,"e":27165,"s":26913,"text":"Generally, a NiFi has a listener component at the starting of the flow like getfile, which gets the data from source system. On the other end of there is a transmitter component like putfile and there are components in between, which process the data."},{"code":null,"e":27306,"s":27165,"text":"For example, let create a flow, which takes an empty file from one directory and add some text in that file and put it in another directory."},{"code":null,"e":27408,"s":27306,"text":"To begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list."},{"code":null,"e":27510,"s":27408,"text":"To begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list."},{"code":null,"e":27554,"s":27510,"text":"Create an input directory like c:\\inputdir."},{"code":null,"e":27598,"s":27554,"text":"Create an input directory like c:\\inputdir."},{"code":null,"e":27743,"s":27598,"text":"Right-click on the processor and select configure and in properties tab add Input Directory (c:\\inputdir) and click apply and go back to canvas."},{"code":null,"e":27888,"s":27743,"text":"Right-click on the processor and select configure and in properties tab add Input Directory (c:\\inputdir) and click apply and go back to canvas."},{"code":null,"e":27978,"s":27888,"text":"Drag the processor icon to the canvas and select the ReplaceText processor from the list."},{"code":null,"e":28068,"s":27978,"text":"Drag the processor icon to the canvas and select the ReplaceText processor from the list."},{"code":null,"e":28241,"s":28068,"text":"Right-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply."},{"code":null,"e":28414,"s":28241,"text":"Right-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply."},{"code":null,"e":28513,"s":28414,"text":"Go to settings tab, check the failure checkbox at right hand side, and then go back to the canvas."},{"code":null,"e":28612,"s":28513,"text":"Go to settings tab, check the failure checkbox at right hand side, and then go back to the canvas."},{"code":null,"e":28678,"s":28612,"text":"Connect GetFIle processor to ReplaceText on success relationship."},{"code":null,"e":28744,"s":28678,"text":"Connect GetFIle processor to ReplaceText on success relationship."},{"code":null,"e":28830,"s":28744,"text":"Drag the processor icon to the canvas and select the PutFile processor from the list."},{"code":null,"e":28916,"s":28830,"text":"Drag the processor icon to the canvas and select the PutFile processor from the list."},{"code":null,"e":28962,"s":28916,"text":"Create an output directory like c:\\outputdir."},{"code":null,"e":29008,"s":28962,"text":"Create an output directory like c:\\outputdir."},{"code":null,"e":29150,"s":29008,"text":"Right-click on the processor and select configure. In the properties tab, add Directory (c:\\outputdir) and click apply and go back to canvas."},{"code":null,"e":29292,"s":29150,"text":"Right-click on the processor and select configure. In the properties tab, add Directory (c:\\outputdir) and click apply and go back to canvas."},{"code":null,"e":29405,"s":29292,"text":"Go to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas."},{"code":null,"e":29518,"s":29405,"text":"Go to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas."},{"code":null,"e":29588,"s":29518,"text":"Connect the ReplaceText processor to PutFile on success relationship."},{"code":null,"e":29658,"s":29588,"text":"Connect the ReplaceText processor to PutFile on success relationship."},{"code":null,"e":29814,"s":29658,"text":"Now start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file."},{"code":null,"e":29970,"s":29814,"text":"Now start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file."},{"code":null,"e":30119,"s":29970,"text":"By following the above steps, developers can choose any processor and other NiFi component to create suitable flow for their organisation or client."},{"code":null,"e":30374,"s":30119,"text":"Apache NiFi offers the concept of Templates, which makes it easier to reuse and distribute the NiFi flows. The flows can be used by other developers or in other NiFi clusters. It also helps NiFi developers to share their work in repositories like GitHub."},{"code":null,"e":30479,"s":30374,"text":"Let us create a template for the flow, which we created in chapter no 15 “Apache NiFi - Creating Flows”."},{"code":null,"e":30839,"s":30479,"text":"Select all the components of the flow using shift key and then click on the create template icon at the left hand side of the NiFi canvas. You can also see a tool box as shown in the above image. Click on the icon create template marked in blue as in the above picture. Enter the name for the template. A developer can also add description, which is optional."},{"code":null,"e":30966,"s":30839,"text":"Then go to the NiFi templates option in the menu present at the top right hand corner of NiFi UI as show in the picture below."},{"code":null,"e":31134,"s":30966,"text":"Now click the download icon (present at the right hand side in the list) of the template, you want to download. An XML file with the template name will get downloaded."},{"code":null,"e":31353,"s":31134,"text":"To use a template in NiFi, a developer will have to upload its xml file to NiFi using UI. There is an Upload Template icon (marked with blue in below image) beside Create Template icon click on that and browse the xml."},{"code":null,"e":31486,"s":31353,"text":"In the top toolbar of NiFi UI, the template icon is before the label icon. The icon is marked in blue as shown in the picture below."},{"code":null,"e":31613,"s":31486,"text":"Drag the template icon and choose the template from the drop down list and click add. It will add the template to NiFi canvas."},{"code":null,"e":31854,"s":31613,"text":"NiFi offers a large number of API, which helps developers to make changes and get information of NiFi from any other tool or custom developed applications. In this tutorial, we will use postman app in google chrome to explain some examples."},{"code":null,"e":32007,"s":31854,"text":"To add postmantoyour Google Chrome, go to the below mentioned URL and click add to chrome button. You will now see a new app added toyour Google Chrome."},{"code":null,"e":32024,"s":32007,"text":"chrome web store"},{"code":null,"e":32131,"s":32024,"text":"The current version of NiFi rest API is 1.8.0 and the documentation is present in the below mentioned URL."},{"code":null,"e":32190,"s":32131,"text":"https://nifi.apache.org/docs/nifi-docs/rest-api/index.html"},{"code":null,"e":32242,"s":32190,"text":"Following are the most used NiFi rest API Modules −"},{"code":null,"e":32292,"s":32242,"text":"http://:/nifi-api/"},{"code":null,"e":32342,"s":32292,"text":"http://:/nifi-api/"},{"code":null,"e":32418,"s":32342,"text":"In case HTTPS is enabled\nhttps://:/nifi-api/"},{"code":null,"e":32494,"s":32418,"text":"In case HTTPS is enabled\nhttps://:/nifi-api/"},{"code":null,"e":32596,"s":32494,"text":"Let us now consider an example and run on postman to get the details about the running NiFi instance."},{"code":null,"e":32643,"s":32596,"text":"GET http://localhost:8080/nifi-api/flow/about\n"},{"code":null,"e":32928,"s":32643,"text":"{\n \"about\": {\n \"title\": \"NiFi\",\n \"version\": \"1.7.1\",\n \"uri\": \"http://localhost:8080/nifi-api/\",\n \"contentViewerUrl\": \"../nifi-content-viewer/\",\n \"timezone\": \"SGT\",\n \"buildTag\": \"nifi-1.7.1-RC1\",\n \"buildTimestamp\": \"07/12/2018 12:54:43 SGT\"\n }\n}\n"},{"code":null,"e":33209,"s":32928,"text":"Apache NiFi logs and store every information about the events occur on the ingested data in the flow. Data provenance repository stores this information and provides UI to search this event information. Data provenance can be accessed for full NiFi level and processor level also."},{"code":null,"e":33324,"s":33209,"text":"The following table lists down the different fields in the NiFi Data Provenance event list have following fields −"},{"code":null,"e":33466,"s":33324,"text":"To get more information about the event, a user can click on the information icon present in the first column of the NiFi Data Provenance UI."},{"code":null,"e":33575,"s":33466,"text":"There are some properties in nifi.properties file, which are used to manage NiFi Data Provenance repository."},{"code":null,"e":33786,"s":33575,"text":"In Apache NiFi, there are multiple ways to monitor the different statistics of the system like errors, memory usage, CPU usage, Data Flow statistics, etc. We will discuss the most popular ones in this tutorial."},{"code":null,"e":33864,"s":33786,"text":"In this section, we will learn more about in built monitoring in Apache NiFi."},{"code":null,"e":34301,"s":33864,"text":"The bulletin board shows the latest ERROR and WARNING getting generated by NiFi processors in real time. To access the bulletin board, a user will have to go the right hand drop down menu and select the Bulletin Board option. It refreshes automatically and a user can disable it also. A user can also navigate to the actual processor by double-clicking the error. A user can also filter the bulletins by working out with the following −"},{"code":null,"e":34312,"s":34301,"text":"by message"},{"code":null,"e":34320,"s":34312,"text":"by name"},{"code":null,"e":34326,"s":34320,"text":"by id"},{"code":null,"e":34338,"s":34326,"text":"by group id"},{"code":null,"e":34603,"s":34338,"text":"To monitor the Events occurring on any specific processor or throughout NiFi, a user can access the Data provenance from the same menu as the bulletin board. A user can also filter the events in data provenance repository by working out with the following fields −"},{"code":null,"e":34621,"s":34603,"text":"by component name"},{"code":null,"e":34639,"s":34621,"text":"by component type"},{"code":null,"e":34647,"s":34639,"text":"by type"},{"code":null,"e":35012,"s":34647,"text":"Apache NiFi summary also can be accessed from the same menu as the bulletin board. This UI contains information about all the components of that particular NiFi instance or cluster. They can be filtered by name, by type or by URI. There are different tabs for different component types. Following are the components, which can be monitored in the NiFi summary UI −"},{"code":null,"e":35023,"s":35012,"text":"Processors"},{"code":null,"e":35035,"s":35023,"text":"Input ports"},{"code":null,"e":35048,"s":35035,"text":"Output ports"},{"code":null,"e":35070,"s":35048,"text":"Remote process groups"},{"code":null,"e":35082,"s":35070,"text":"Connections"},{"code":null,"e":35097,"s":35082,"text":"Process groups"},{"code":null,"e":35209,"s":35097,"text":"In this UI, there is a link at the bottom right hand side named system diagnostics to check the JVM statistics."},{"code":null,"e":35541,"s":35209,"text":"Apache NiFi provides multiple reporting tasks to support external monitoring systems like Ambari, Grafana, etc. A developer can create a custom reporting task or can configure the inbuilt ones to send the metrics of NiFi to the externals monitoring systems. The following table lists down the reporting tasks offered by NiFi 1.7.1."},{"code":null,"e":35697,"s":35541,"text":"There is an API named system diagnostics, which can be used to monitor the NiFI stats in any custom developed application. Let us check the API in postman."},{"code":null,"e":35748,"s":35697,"text":"http://localhost:8080/nifi-api/system-diagnostics\n"},{"code":null,"e":38653,"s":35748,"text":"{\n \"systemDiagnostics\": {\n \"aggregateSnapshot\": {\n \"totalNonHeap\": \"183.89 MB\",\n \"totalNonHeapBytes\": 192819200,\n \"usedNonHeap\": \"173.47 MB\",\n \"usedNonHeapBytes\": 181894560,\n \"freeNonHeap\": \"10.42 MB\",\n \"freeNonHeapBytes\": 10924640,\n \"maxNonHeap\": \"-1 bytes\",\n \"maxNonHeapBytes\": -1,\n \"totalHeap\": \"512 MB\",\n \"totalHeapBytes\": 536870912,\n \"usedHeap\": \"273.37 MB\",\n \"usedHeapBytes\": 286652264,\n \"freeHeap\": \"238.63 MB\",\n \"freeHeapBytes\": 250218648,\n \"maxHeap\": \"512 MB\",\n \"maxHeapBytes\": 536870912,\n \"heapUtilization\": \"53.0%\",\n \"availableProcessors\": 4,\n \"processorLoadAverage\": -1,\n \"totalThreads\": 71,\n \"daemonThreads\": 31,\n \"uptime\": \"17:30:35.277\",\n \"flowFileRepositoryStorageUsage\": {\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n },\n \"contentRepositoryStorageUsage\": [\n {\n \"identifier\": \"default\",\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n }\n ],\n \"provenanceRepositoryStorageUsage\": [\n {\n \"identifier\": \"default\",\n \"freeSpace\": \"286.93 GB\",\n \"totalSpace\": \"464.78 GB\",\n \"usedSpace\": \"177.85 GB\",\n \"freeSpaceBytes\": 308090789888,\n \"totalSpaceBytes\": 499057160192,\n \"usedSpaceBytes\": 190966370304,\n \"utilization\": \"38.0%\"\n }\n ],\n \"garbageCollection\": [\n {\n \"name\": \"G1 Young Generation\",\n \"collectionCount\": 344,\n \"collectionTime\": \"00:00:06.239\",\n \"collectionMillis\": 6239\n },\n {\n \"name\": \"G1 Old Generation\",\n \"collectionCount\": 0,\n \"collectionTime\": \"00:00:00.000\",\n \"collectionMillis\": 0\n }\n ],\n \"statsLastRefreshed\": \"09:30:20 SGT\",\n \"versionInfo\": {\n \"niFiVersion\": \"1.7.1\",\n \"javaVendor\": \"Oracle Corporation\",\n \"javaVersion\": \"1.8.0_151\",\n \"osName\": \"Windows 7\",\n \"osVersion\": \"6.1\",\n \"osArchitecture\": \"amd64\",\n \"buildTag\": \"nifi-1.7.1-RC1\",\n \"buildTimestamp\": \"07/12/2018 12:54:43 SGT\"\n }\n }\n }\n}\n"},{"code":null,"e":38946,"s":38653,"text":"Before starting the upgrade of Apache NiFi, read the release notes to know about the changes and additions. A user needs to evaluate the impact of these additions and changes in his/her current NiFi installation. Below is the link to get the release notes for the new releases of Apache NiFi."},{"code":null,"e":39009,"s":38946,"text":"https://cwiki.apache.org/confluence/display/NIFI/Release+Notes"},{"code":null,"e":39156,"s":39009,"text":"In a cluster setup, a user needs to upgrade NiFi installation of every Node in a cluster. Follow the steps given below to upgrade the Apache NiFi."},{"code":null,"e":39240,"s":39156,"text":"Backup all the custom NARs present in your current NiFi or lib or any other folder."},{"code":null,"e":39324,"s":39240,"text":"Backup all the custom NARs present in your current NiFi or lib or any other folder."},{"code":null,"e":39481,"s":39324,"text":"Download the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version.\nhttps://nifi.apache.org/download.html"},{"code":null,"e":39600,"s":39481,"text":"Download the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version."},{"code":null,"e":39638,"s":39600,"text":"https://nifi.apache.org/download.html"},{"code":null,"e":39756,"s":39638,"text":"Create a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi."},{"code":null,"e":39874,"s":39756,"text":"Create a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi."},{"code":null,"e":40039,"s":39874,"text":"Stop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi."},{"code":null,"e":40204,"s":40039,"text":"Stop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi."},{"code":null,"e":40297,"s":40204,"text":"Copy the configuration of authorizers.xml from current NiFi installation to the new version."},{"code":null,"e":40390,"s":40297,"text":"Copy the configuration of authorizers.xml from current NiFi installation to the new version."},{"code":null,"e":40509,"s":40390,"text":"Update the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one."},{"code":null,"e":40628,"s":40509,"text":"Update the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one."},{"code":null,"e":40698,"s":40628,"text":"Add the custom logging from logback.xml to the new NiFi installation."},{"code":null,"e":40768,"s":40698,"text":"Add the custom logging from logback.xml to the new NiFi installation."},{"code":null,"e":40864,"s":40768,"text":"Configure the login identity provider in login-identity-providers.xml from the current version."},{"code":null,"e":40960,"s":40864,"text":"Configure the login identity provider in login-identity-providers.xml from the current version."},{"code":null,"e":41056,"s":40960,"text":"Update all the properties in nifi.properties of the new NiFi installation from current version."},{"code":null,"e":41152,"s":41056,"text":"Update all the properties in nifi.properties of the new NiFi installation from current version."},{"code":null,"e":41279,"s":41152,"text":"Please make sure that the group and user of new version is same as the current version, to avoid any permission denied errors."},{"code":null,"e":41406,"s":41279,"text":"Please make sure that the group and user of new version is same as the current version, to avoid any permission denied errors."},{"code":null,"e":41494,"s":41406,"text":"Copy the configuration from state-management.xml of current version to the new version."},{"code":null,"e":41582,"s":41494,"text":"Copy the configuration from state-management.xml of current version to the new version."},{"code":null,"e":41999,"s":41582,"text":"Copy the contents of the following directories from current version of NiFi installation to the same directories in the new version.\n\n./conf/flow.xml.gz\nAlso flow.xml.gz from the archive directory.\nFor provenance and content repositories change the values in nifi. properties file to the current repositories.\ncopy state from ./state/local or change in nifi.properties if any other external directory is specified.\n\n"},{"code":null,"e":42132,"s":41999,"text":"Copy the contents of the following directories from current version of NiFi installation to the same directories in the new version."},{"code":null,"e":42151,"s":42132,"text":"./conf/flow.xml.gz"},{"code":null,"e":42170,"s":42151,"text":"./conf/flow.xml.gz"},{"code":null,"e":42215,"s":42170,"text":"Also flow.xml.gz from the archive directory."},{"code":null,"e":42260,"s":42215,"text":"Also flow.xml.gz from the archive directory."},{"code":null,"e":42372,"s":42260,"text":"For provenance and content repositories change the values in nifi. properties file to the current repositories."},{"code":null,"e":42484,"s":42372,"text":"For provenance and content repositories change the values in nifi. properties file to the current repositories."},{"code":null,"e":42589,"s":42484,"text":"copy state from ./state/local or change in nifi.properties if any other external directory is specified."},{"code":null,"e":42694,"s":42589,"text":"copy state from ./state/local or change in nifi.properties if any other external directory is specified."},{"code":null,"e":42860,"s":42694,"text":"Recheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions."},{"code":null,"e":43026,"s":42860,"text":"Recheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions."},{"code":null,"e":43176,"s":43026,"text":"Start all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors."},{"code":null,"e":43326,"s":43176,"text":"Start all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors."},{"code":null,"e":43387,"s":43326,"text":"Monitor bulletins for some time to check for any new errors."},{"code":null,"e":43448,"s":43387,"text":"Monitor bulletins for some time to check for any new errors."},{"code":null,"e":43564,"s":43448,"text":"If the new version is working correctly, then the current version can be archived and deleted from the directories."},{"code":null,"e":43680,"s":43564,"text":"If the new version is working correctly, then the current version can be archived and deleted from the directories."},{"code":null,"e":43944,"s":43680,"text":"Apache NiFi Remote Process Group or RPG enables flow to direct the FlowFiles in a flow to different NiFi instances using Site-to-Site protocol. As of version 1.7.1, NiFi does not offer balanced relationships, so RPG is used for load balancing in a NiFi data flow."},{"code":null,"e":44133,"s":43944,"text":"A developer can add the RPG from the top toolbar of NiFi UI by dragging the icon as shown in the above picture to canvas. To configure an RPG, a Developer has to add the following fields −"},{"code":null,"e":44225,"s":44133,"text":"A developer needs to enable it, before using it like we start processors before using them."},{"code":null,"e":44443,"s":44225,"text":"Apache NiFi offers shared services, which can be shared by processors and reporting task is called controller settings. These are like Database connection pool, which can be used by processors accessing same database."},{"code":null,"e":44565,"s":44443,"text":"To access the controller settings, use the drop down menu at the right top corner of NiFi UI as shown in the below image."},{"code":null,"e":44690,"s":44565,"text":"There are many controller settings offered by Apache NiFi, we will discuss a commonly used one and how we set it up in NiFi."},{"code":null,"e":44947,"s":44690,"text":"Add the plus sign in the Nifi Settings page after clicking the Controller settings option. Then select the DBCPConnectionPool from the list of controller settings. DBCPConnectionPool will be added in the main NiFi settings page as shown in the below image."},{"code":null,"e":45019,"s":44947,"text":"It contains the following information about the controller setting:Name"},{"code":null,"e":45024,"s":45019,"text":"Type"},{"code":null,"e":45031,"s":45024,"text":"Bundle"},{"code":null,"e":45037,"s":45031,"text":"State"},{"code":null,"e":45043,"s":45037,"text":"Scope"},{"code":null,"e":45069,"s":45043,"text":"Configure and delete icon"},{"code":null,"e":45175,"s":45069,"text":"Click on the configure icon and fill the required fields. The fields are listed down in the table below −"},{"code":null,"e":45476,"s":45175,"text":"To stop or configure a controller setting, first all the attached NiFi components should be stopped. NiFi also adds scope in controller settings to manage the configuration of it. Therefore, only the ones which shared the same settings will not get impacted and will use the same controller settings."},{"code":null,"e":45729,"s":45476,"text":"Apache NiFi reporting tasks are similar to the controller services, which run in the background and send or log the statistics of NiFi instance. NiFi reporting task can also be accessed from the same page as controller settings, but in a different tab."},{"code":null,"e":46114,"s":45729,"text":"To add a reporting task, a developer needs to click on the plus button present at the top right hand side of the reporting tasks page. These reporting tasks are mainly used for monitoring the activities of a NiFi instance, in either the bulletins or the provenance. Mainly these reporting tasks uses Site-to-Site to transport the NiFi statistics data to other node or external system."},{"code":null,"e":46181,"s":46114,"text":"Let us now add a configured reporting task for more understanding."},{"code":null,"e":46348,"s":46181,"text":"This reporting task is used to generate bulletins, when a memory pool crosses specified percentage. Follow these steps to configure the MonitorMemory reporting task −"},{"code":null,"e":46411,"s":46348,"text":"Add in the plus sign and search for MonitorMemory in the list."},{"code":null,"e":46474,"s":46411,"text":"Add in the plus sign and search for MonitorMemory in the list."},{"code":null,"e":46513,"s":46474,"text":"Select MonitorMemory and click on ADD."},{"code":null,"e":46552,"s":46513,"text":"Select MonitorMemory and click on ADD."},{"code":null,"e":46645,"s":46552,"text":"Once it is added in the main page of reporting tasks main page, click on the configure icon."},{"code":null,"e":46738,"s":46645,"text":"Once it is added in the main page of reporting tasks main page, click on the configure icon."},{"code":null,"e":46812,"s":46738,"text":"In the properties tab, select the memory pool, which you want to monitor."},{"code":null,"e":46886,"s":46812,"text":"In the properties tab, select the memory pool, which you want to monitor."},{"code":null,"e":46960,"s":46886,"text":"Select the percentage after which you want bulletins to alert the users.\n"},{"code":null,"e":47034,"s":46960,"text":"Select the percentage after which you want bulletins to alert the users.\n"},{"code":null,"e":47060,"s":47034,"text":"Start the reporting task."},{"code":null,"e":47086,"s":47060,"text":"Start the reporting task."},{"code":null,"e":47258,"s":47086,"text":"Apache NiFi is an open source platform and gives developers the options to add their custom processor in the NiFi library. Follow these steps to create a custom processor."},{"code":null,"e":47353,"s":47258,"text":"Download Maven latest version from the link given below.\nhttps://maven.apache.org/download.cgi"},{"code":null,"e":47410,"s":47353,"text":"Download Maven latest version from the link given below."},{"code":null,"e":47448,"s":47410,"text":"https://maven.apache.org/download.cgi"},{"code":null,"e":47544,"s":47448,"text":"Add an environment variable named M2_HOME and set value as the installation directory of maven."},{"code":null,"e":47640,"s":47544,"text":"Add an environment variable named M2_HOME and set value as the installation directory of maven."},{"code":null,"e":47730,"s":47640,"text":"Download Eclipse IDE from the below link.\nhttps://www.eclipse.org/downloads/download.php\n"},{"code":null,"e":47772,"s":47730,"text":"Download Eclipse IDE from the below link."},{"code":null,"e":47820,"s":47772,"text":"https://www.eclipse.org/downloads/download.php\n"},{"code":null,"e":47877,"s":47820,"text":"Open command prompt and execute Maven Archetype command."},{"code":null,"e":47934,"s":47877,"text":"Open command prompt and execute Maven Archetype command."},{"code":null,"e":47960,"s":47934,"text":"> mvn archetype:generate\n"},{"code":null,"e":48012,"s":47960,"text":"Search for the nifi type in the archetype projects."},{"code":null,"e":48064,"s":48012,"text":"Search for the nifi type in the archetype projects."},{"code":null,"e":48128,"s":48064,"text":"Select org.apache.nifi:nifi-processor-bundle-archetype project."},{"code":null,"e":48192,"s":48128,"text":"Select org.apache.nifi:nifi-processor-bundle-archetype project."},{"code":null,"e":48279,"s":48192,"text":"Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial."},{"code":null,"e":48366,"s":48279,"text":"Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial."},{"code":null,"e":48441,"s":48366,"text":"Enter the groupId, artifactId, version, package, and artifactBaseName etc."},{"code":null,"e":48516,"s":48441,"text":"Enter the groupId, artifactId, version, package, and artifactBaseName etc."},{"code":null,"e":48642,"s":48516,"text":"Then a maven project will be created having to directories.\n\nnifi--processors\nnifi--nar\n\n"},{"code":null,"e":48702,"s":48642,"text":"Then a maven project will be created having to directories."},{"code":null,"e":48737,"s":48702,"text":"nifi--processors"},{"code":null,"e":48772,"s":48737,"text":"nifi--processors"},{"code":null,"e":48800,"s":48772,"text":"nifi--nar"},{"code":null,"e":48828,"s":48800,"text":"nifi--nar"},{"code":null,"e":48933,"s":48828,"text":"Run the below command in nifi--processors directory to add the project in the eclipse."},{"code":null,"e":49038,"s":48933,"text":"Run the below command in nifi--processors directory to add the project in the eclipse."},{"code":null,"e":49067,"s":49038,"text":"mvn install eclipse:eclipse\n"},{"code":null,"e":49118,"s":49067,"text":"Open eclipse and select import from the file menu."},{"code":null,"e":49169,"s":49118,"text":"Open eclipse and select import from the file menu."},{"code":null,"e":49298,"s":49169,"text":"Then select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse."},{"code":null,"e":49427,"s":49298,"text":"Then select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse."},{"code":null,"e":49578,"s":49427,"text":"Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run."},{"code":null,"e":49729,"s":49578,"text":"Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run."},{"code":null,"e":49805,"s":49729,"text":"Then package the code to a NAR file by running the below mentioned command."},{"code":null,"e":49881,"s":49805,"text":"Then package the code to a NAR file by running the below mentioned command."},{"code":null,"e":49900,"s":49881,"text":"mvn clean install\n"},{"code":null,"e":49958,"s":49900,"text":"A NAR file will be created at nifi--nar/target directory."},{"code":null,"e":50016,"s":49958,"text":"A NAR file will be created at nifi--nar/target directory."},{"code":null,"e":50089,"s":50016,"text":"Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi."},{"code":null,"e":50162,"s":50089,"text":"Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi."},{"code":null,"e":50251,"s":50162,"text":"After successful restart of NiFi, check the processor list for the new custom processor."},{"code":null,"e":50340,"s":50251,"text":"After successful restart of NiFi, check the processor list for the new custom processor."},{"code":null,"e":50384,"s":50340,"text":"For any errors, check ./logs/nifi.log file."},{"code":null,"e":50428,"s":50384,"text":"For any errors, check ./logs/nifi.log file."},{"code":null,"e":50634,"s":50428,"text":"Apache NiFi is an open source platform and gives developers the options to add their custom controllers service in Apache NiFi. The steps and tools are almost the same as used to create a custom processor."},{"code":null,"e":50691,"s":50634,"text":"Open command prompt and execute Maven Archetype command."},{"code":null,"e":50748,"s":50691,"text":"Open command prompt and execute Maven Archetype command."},{"code":null,"e":50774,"s":50748,"text":"> mvn archetype:generate\n"},{"code":null,"e":50826,"s":50774,"text":"Search for the nifi type in the archetype projects."},{"code":null,"e":50878,"s":50826,"text":"Search for the nifi type in the archetype projects."},{"code":null,"e":50940,"s":50878,"text":"Select org.apache.nifi:nifi-service-bundle-archetype project."},{"code":null,"e":51002,"s":50940,"text":"Select org.apache.nifi:nifi-service-bundle-archetype project."},{"code":null,"e":51087,"s":51002,"text":"Then from the list of versions, select the latest version – 1.7.1 for this tutorial."},{"code":null,"e":51172,"s":51087,"text":"Then from the list of versions, select the latest version – 1.7.1 for this tutorial."},{"code":null,"e":51248,"s":51172,"text":"Enter the groupId, artifactId, version, package, and artifactBaseName, etc."},{"code":null,"e":51324,"s":51248,"text":"Enter the groupId, artifactId, version, package, and artifactBaseName, etc."},{"code":null,"e":51491,"s":51324,"text":"A maven project will be created having directories.\n\nnifi-\nnifi--nar\nnifi--api\nnifi--api-nar\n\n"},{"code":null,"e":51543,"s":51491,"text":"A maven project will be created having directories."},{"code":null,"e":51567,"s":51543,"text":"nifi-"},{"code":null,"e":51591,"s":51567,"text":"nifi-"},{"code":null,"e":51619,"s":51591,"text":"nifi--nar"},{"code":null,"e":51647,"s":51619,"text":"nifi--nar"},{"code":null,"e":51675,"s":51647,"text":"nifi--api"},{"code":null,"e":51703,"s":51675,"text":"nifi--api"},{"code":null,"e":51735,"s":51703,"text":"nifi--api-nar"},{"code":null,"e":51767,"s":51735,"text":"nifi--api-nar"},{"code":null,"e":51933,"s":51767,"text":"Run the below command in nifi- and nifi--api directories to add these two projects in the eclipse.\n\nmvn install eclipse:eclipse\n\n"},{"code":null,"e":52068,"s":51933,"text":"Run the below command in nifi- and nifi--api directories to add these two projects in the eclipse."},{"code":null,"e":52096,"s":52068,"text":"mvn install eclipse:eclipse"},{"code":null,"e":52124,"s":52096,"text":"mvn install eclipse:eclipse"},{"code":null,"e":52175,"s":52124,"text":"Open eclipse and select import from the file menu."},{"code":null,"e":52226,"s":52175,"text":"Open eclipse and select import from the file menu."},{"code":null,"e":52378,"s":52226,"text":"Then select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse."},{"code":null,"e":52530,"s":52378,"text":"Then select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse."},{"code":null,"e":52565,"s":52530,"text":"Add your code in the source files."},{"code":null,"e":52600,"s":52565,"text":"Add your code in the source files."},{"code":null,"e":52697,"s":52600,"text":"Then package the code to a NAR file by running the below mentioned command.\n\nmvn clean install\n\n"},{"code":null,"e":52773,"s":52697,"text":"Then package the code to a NAR file by running the below mentioned command."},{"code":null,"e":52791,"s":52773,"text":"mvn clean install"},{"code":null,"e":52809,"s":52791,"text":"mvn clean install"},{"code":null,"e":52928,"s":52809,"text":"Two NAR files will be created in each nifi-/target and nifi--api/target directory."},{"code":null,"e":53047,"s":52928,"text":"Two NAR files will be created in each nifi-/target and nifi--api/target directory."},{"code":null,"e":53123,"s":53047,"text":"Copy these NAR files to the lib folder of Apache NiFi and restart the NiFi."},{"code":null,"e":53199,"s":53123,"text":"Copy these NAR files to the lib folder of Apache NiFi and restart the NiFi."},{"code":null,"e":53288,"s":53199,"text":"After successful restart of NiFi, check the processor list for the new custom processor."},{"code":null,"e":53377,"s":53288,"text":"After successful restart of NiFi, check the processor list for the new custom processor."},{"code":null,"e":53421,"s":53377,"text":"For any errors, check ./logs/nifi.log file."},{"code":null,"e":53465,"s":53421,"text":"For any errors, check ./logs/nifi.log file."},{"code":null,"e":53724,"s":53465,"text":"Apache NiFi uses logback library to handle its logging. There is a file logback.xml present in the conf directory of NiFi, which is used to configure the logging in NiFi. The logs are generated in logs folder of NiFi and the log files are as described below."},{"code":null,"e":53988,"s":53724,"text":"This is the main log file of nifi, which logs all the activities of apache NiFi application ranging from NAR files loading to the run time errors or bulletins encountered by NiFi components. Below is the default appender in logback.xml file for nifi-app.log file."},{"code":null,"e":54679,"s":53988,"text":"\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-app.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/\n\t nifi-app_%d{yyyy-MM-dd_HH}.%i.log\n \n 100MB\n 30\n \n true\n \n %date %level [%thread] %logger{40} %msg%n\n \n"},{"code":null,"e":54973,"s":54679,"text":"The appender name is APP_FILE, and the class is RollingFileAppender, which means logger is using rollback policy. By default, the max file size is 100 MB and can be changed to the required size. The maximum retention for APP_FILE is 30 log files and can be changed as per the user requirement."},{"code":null,"e":55128,"s":54973,"text":"This log contains the user events like web security, web api config, user authorization, etc. Below is the appender for nifi-user.log in logback.xml file."},{"code":null,"e":55714,"s":55128,"text":"\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-user.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/\n\t nifi-user_%d.log\n \n 30\n \n \n %date %level [%thread] %logger{40} %msg%n\n \n"},{"code":null,"e":55916,"s":55714,"text":"The appender name is USER_FILE. It follows the rollover policy. The maximum retention period for USER_FILE is 30 log files. Below is the default loggers for USER_FILE appender present in nifi-user.log."},{"code":null,"e":56550,"s":55916,"text":"\n \n\n\n \n\n\n \n\n\n \n\n\n \n"},{"code":null,"e":56807,"s":56550,"text":"This log contains the bootstrap logs, apache NiFi’s standard output (all system.out written in the code mainly for debugging), and standard error (all system.err written in the code). Below is the default appender for the nifi-bootstrap.log in logback.log."},{"code":null,"e":57396,"s":56807,"text":"\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap.log\n \n \n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap_%d.log\n \n 5\n \n \n %date %level [%thread] %logger{40} %msg%n\n \n"},{"code":null,"e":57615,"s":57396,"text":"nifi-bootstrap.log file,s appender name is BOOTSTRAP_FILE, which also follows rollback policy. The maximum retention for BOOTSTRAP_FILE appender is 5 log files. Below is the default loggers for nifi-bootstrap.log file."},{"code":null,"e":58152,"s":57615,"text":"\n \n\n\n \n \n\n\n \n\n\n \n"},{"code":null,"e":58187,"s":58152,"text":"\n 46 Lectures \n 3.5 hours \n"},{"code":null,"e":58206,"s":58187,"text":" Arnab Chakraborty"},{"code":null,"e":58241,"s":58206,"text":"\n 23 Lectures \n 1.5 hours \n"},{"code":null,"e":58262,"s":58241,"text":" Mukund Kumar Mishra"},{"code":null,"e":58295,"s":58262,"text":"\n 16 Lectures \n 1 hours \n"},{"code":null,"e":58308,"s":58295,"text":" Nilay Mehta"},{"code":null,"e":58343,"s":58308,"text":"\n 52 Lectures \n 1.5 hours \n"},{"code":null,"e":58361,"s":58343,"text":" Bigdata Engineer"},{"code":null,"e":58394,"s":58361,"text":"\n 14 Lectures \n 1 hours \n"},{"code":null,"e":58412,"s":58394,"text":" Bigdata Engineer"},{"code":null,"e":58445,"s":58412,"text":"\n 23 Lectures \n 1 hours \n"},{"code":null,"e":58463,"s":58445,"text":" Bigdata Engineer"},{"code":null,"e":58470,"s":58463,"text":" Print"},{"code":null,"e":58481,"s":58470,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 2665,\n \"s\": 2318,\n \"text\": \"Apache NiFi is a powerful, easy to use and reliable system to process and distribute data between disparate systems. It is based on Niagara Files technology developed by NSA and then after 8 years donated to Apache Software foundation. It is distributed under Apache License Version 2.0, January 2004. The latest version for Apache NiFi is 1.7.1.\"\n },\n {\n \"code\": null,\n \"e\": 3083,\n \"s\": 2665,\n \"text\": \"Apache NiFi is a real time data ingestion platform, which can transfer and manage data transfer between different sources and destination systems. It supports a wide variety of data formats like logs, geo location data, social feeds, etc. It also supports many protocols like SFTP, HDFS, and KAFKA, etc. This support to wide variety of data sources and protocols making this platform popular in many IT organizations.\"\n },\n {\n \"code\": null,\n \"e\": 3136,\n \"s\": 3083,\n \"text\": \"The general features of Apache NiFi are as follows −\"\n },\n {\n \"code\": null,\n \"e\": 3271,\n \"s\": 3136,\n \"text\": \"Apache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring.\"\n },\n {\n \"code\": null,\n \"e\": 3406,\n \"s\": 3271,\n \"text\": \"Apache NiFi provides a web-based user interface, which provides seamless experience between design, control, feedback, and monitoring.\"\n },\n {\n \"code\": null,\n \"e\": 3573,\n \"s\": 3406,\n \"text\": \"It is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime.\"\n },\n {\n \"code\": null,\n \"e\": 3740,\n \"s\": 3573,\n \"text\": \"It is highly configurable. This helps users with guaranteed delivery, low latency, high throughput, dynamic prioritization, back pressure and modify flows on runtime.\"\n },\n {\n \"code\": null,\n \"e\": 3845,\n \"s\": 3740,\n \"text\": \"It also provides data provenance module to track and monitor data from the start to the end of the flow.\"\n },\n {\n \"code\": null,\n \"e\": 3950,\n \"s\": 3845,\n \"text\": \"It also provides data provenance module to track and monitor data from the start to the end of the flow.\"\n },\n {\n \"code\": null,\n \"e\": 4046,\n \"s\": 3950,\n \"text\": \"Developers can create their own custom processors and reporting tasks according to their needs.\"\n },\n {\n \"code\": null,\n \"e\": 4142,\n \"s\": 4046,\n \"text\": \"Developers can create their own custom processors and reporting tasks according to their needs.\"\n },\n {\n \"code\": null,\n \"e\": 4233,\n \"s\": 4142,\n \"text\": \"NiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions.\"\n },\n {\n \"code\": null,\n \"e\": 4324,\n \"s\": 4233,\n \"text\": \"NiFi also provides support to secure protocols like SSL, HTTPS, SSH and other encryptions.\"\n },\n {\n \"code\": null,\n \"e\": 4422,\n \"s\": 4324,\n \"text\": \"It also supports user and role management and also can be configured with LDAP for authorization.\"\n },\n {\n \"code\": null,\n \"e\": 4520,\n \"s\": 4422,\n \"text\": \"It also supports user and role management and also can be configured with LDAP for authorization.\"\n },\n {\n \"code\": null,\n \"e\": 4569,\n \"s\": 4520,\n \"text\": \"The key concepts of Apache NiFi are as follows −\"\n },\n {\n \"code\": null,\n \"e\": 4681,\n \"s\": 4569,\n \"text\": \"Process Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner.\"\n },\n {\n \"code\": null,\n \"e\": 4793,\n \"s\": 4681,\n \"text\": \"Process Group − It is a group of NiFi flows, which helps a userto manage and keep flows in hierarchical manner.\"\n },\n {\n \"code\": null,\n \"e\": 4955,\n \"s\": 4793,\n \"text\": \"Flow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources.\"\n },\n {\n \"code\": null,\n \"e\": 5117,\n \"s\": 4955,\n \"text\": \"Flow − It is created connecting different processors to transfer and modify data if required from one data source or sources to another destination data sources.\"\n },\n {\n \"code\": null,\n \"e\": 5334,\n \"s\": 5117,\n \"text\": \"Processor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile.\"\n },\n {\n \"code\": null,\n \"e\": 5551,\n \"s\": 5334,\n \"text\": \"Processor − A processor is a java module responsible for either fetching data from sourcing system or storing it in destination system. Other processors are also used to add attributes or change content in flowfile.\"\n },\n {\n \"code\": null,\n \"e\": 5887,\n \"s\": 5551,\n \"text\": \"Flowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow.\"\n },\n {\n \"code\": null,\n \"e\": 6223,\n \"s\": 5887,\n \"text\": \"Flowfile − It is the basic usage of NiFi, which represents the single object of the data picked from source system in NiFi. NiFiprocessormakes changes to flowfile while it moves from the source processor to the destination. Different events like CREATE, CLONE, RECEIVE, etc. are performed on flowfile by different processors in a flow.\"\n },\n {\n \"code\": null,\n \"e\": 6354,\n \"s\": 6223,\n \"text\": \"Event − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance.\"\n },\n {\n \"code\": null,\n \"e\": 6485,\n \"s\": 6354,\n \"text\": \"Event − Events represent the change in flowfile while traversing through a NiFi Flow. These events are tracked in data provenance.\"\n },\n {\n \"code\": null,\n \"e\": 6693,\n \"s\": 6485,\n \"text\": \"Data provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile.\"\n },\n {\n \"code\": null,\n \"e\": 6901,\n \"s\": 6693,\n \"text\": \"Data provenance − It is a repository.It also has a UI, which enables users to check the information about a flowfile and helps in troubleshooting if any issues that arise during the processing of a flowfile.\"\n },\n {\n \"code\": null,\n \"e\": 6999,\n \"s\": 6901,\n \"text\": \"Apache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage.\"\n },\n {\n \"code\": null,\n \"e\": 7097,\n \"s\": 6999,\n \"text\": \"Apache NiFi enables data fetching from remote machines by using SFTP and guarantees data lineage.\"\n },\n {\n \"code\": null,\n \"e\": 7256,\n \"s\": 7097,\n \"text\": \"Apache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing.\"\n },\n {\n \"code\": null,\n \"e\": 7415,\n \"s\": 7256,\n \"text\": \"Apache NiFi supports clustering, so it can work on multiple nodes with same flow processing different data, which increase the performance of data processing.\"\n },\n {\n \"code\": null,\n \"e\": 7508,\n \"s\": 7415,\n \"text\": \"It also provides security policies on user level, process group level and other modules too.\"\n },\n {\n \"code\": null,\n \"e\": 7601,\n \"s\": 7508,\n \"text\": \"It also provides security policies on user level, process group level and other modules too.\"\n },\n {\n \"code\": null,\n \"e\": 7686,\n \"s\": 7601,\n \"text\": \"Its UI can also run on HTTPS, which makes the interaction of users with NiFi secure.\"\n },\n {\n \"code\": null,\n \"e\": 7771,\n \"s\": 7686,\n \"text\": \"Its UI can also run on HTTPS, which makes the interaction of users with NiFi secure.\"\n },\n {\n \"code\": null,\n \"e\": 7892,\n \"s\": 7771,\n \"text\": \"NiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems.\"\n },\n {\n \"code\": null,\n \"e\": 8013,\n \"s\": 7892,\n \"text\": \"NiFi supports around 188 processors and a user can also create custom plugins to support a wide variety of data systems.\"\n },\n {\n \"code\": null,\n \"e\": 8239,\n \"s\": 8013,\n \"text\": \"When node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node.\"\n },\n {\n \"code\": null,\n \"e\": 8465,\n \"s\": 8239,\n \"text\": \"When node gets disconnected from NiFi cluster while a user is making any changes in it, then the flow.xml becomes invalid.Anode cannot connect back to the cluster unless admin manually copies flow.xml from the connected node.\"\n },\n {\n \"code\": null,\n \"e\": 8617,\n \"s\": 8465,\n \"text\": \"Apache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems.\"\n },\n {\n \"code\": null,\n \"e\": 8769,\n \"s\": 8617,\n \"text\": \"Apache NiFi have state persistence issue in case of primary node switch, which sometimes makes processors not able to fetch data from sourcing systems.\"\n },\n {\n \"code\": null,\n \"e\": 8999,\n \"s\": 8769,\n \"text\": \"Apache NiFi consist of a web server, flow controller and a processor, which runs on Java Virtual Machine. It also has 3 repositories Flowfile Repository, Content Repository, and Provenance Repository as shown in the figure below.\"\n },\n {\n \"code\": null,\n \"e\": 9319,\n \"s\": 8999,\n \"text\": \"This repository stores the current state and attributes of every flowfile that goes through the data flows of apache NiFi. The default location of this repository is in the root directory of apache NiFi. The location of this repository can be changed by changing the property named \\\"nifi.flowfile.repository.directory\\\".\"\n },\n {\n \"code\": null,\n \"e\": 9663,\n \"s\": 9319,\n \"text\": \"This repository contains all the content present in all the flowfiles of NiFi. Its default directory is also in the root directory of NiFi and it can be changed using \\\"org.apache.nifi.controller.repository.FileSystemRepository\\\" property. This directory uses large space in disk so it is advisable to have enough space in the installation disk.\"\n },\n {\n \"code\": null,\n \"e\": 10179,\n \"s\": 9663,\n \"text\": \"The repository tracks and stores all the events of all the flowfiles that flow in NiFi. There are two provenance repositories - volatile provenance repository (in this repository all the provenance data get lost after restart) and persistent provenance repository. Its default directory is also in the root directory of NiFi and it can be changed using \\\"org.apache.nifi.provenance.PersistentProvenanceRepository\\\" and \\\"org.apache.nifi.provenance.VolatileProvenanceRepositor\\\" property for the respective repositories.\"\n },\n {\n \"code\": null,\n \"e\": 10312,\n \"s\": 10179,\n \"text\": \"In this chapter, we will learn about the environment setup ofApache NiFi. The steps for installation of Apache NiFi are as follows −\"\n },\n {\n \"code\": null,\n \"e\": 10458,\n \"s\": 10312,\n \"text\": \"Step 1 − Install the current version of Java in your computer. Please set theJAVA_HOME in your machine. You can check the version as shown below:\"\n },\n {\n \"code\": null,\n \"e\": 10516,\n \"s\": 10458,\n \"text\": \"In Windows Operating System (OS) (using command prompt) −\"\n },\n {\n \"code\": null,\n \"e\": 10533,\n \"s\": 10516,\n \"text\": \"> java -version\\n\"\n },\n {\n \"code\": null,\n \"e\": 10562,\n \"s\": 10533,\n \"text\": \"In UNIX OS (Using Terminal):\"\n },\n {\n \"code\": null,\n \"e\": 10581,\n \"s\": 10562,\n \"text\": \"$ echo $JAVA_HOME\\n\"\n },\n {\n \"code\": null,\n \"e\": 10656,\n \"s\": 10584,\n \"text\": \"Step 2 − DownloadApache NiFi from https://nifi.apache.org/download.html\"\n },\n {\n \"code\": null,\n \"e\": 10689,\n \"s\": 10656,\n \"text\": \"For windows OSdownload ZIP file.\"\n },\n {\n \"code\": null,\n \"e\": 10722,\n \"s\": 10689,\n \"text\": \"For windows OSdownload ZIP file.\"\n },\n {\n \"code\": null,\n \"e\": 10752,\n \"s\": 10722,\n \"text\": \"For UNIX OSdownload TAR file.\"\n },\n {\n \"code\": null,\n \"e\": 10782,\n \"s\": 10752,\n \"text\": \"For UNIX OSdownload TAR file.\"\n },\n {\n \"code\": null,\n \"e\": 10864,\n \"s\": 10782,\n \"text\": \"For docker images,go to the following link https://hub.docker.com/r/apache/nifi/.\"\n },\n {\n \"code\": null,\n \"e\": 10946,\n \"s\": 10864,\n \"text\": \"For docker images,go to the following link https://hub.docker.com/r/apache/nifi/.\"\n },\n {\n \"code\": null,\n \"e\": 11044,\n \"s\": 10946,\n \"text\": \"Step 3 − The installation process for Apache NiFi is very easy. The process differs with the OS −\"\n },\n {\n \"code\": null,\n \"e\": 11113,\n \"s\": 11044,\n \"text\": \"Windows OS − Unzip the zip package and the Apache NiFi is installed.\"\n },\n {\n \"code\": null,\n \"e\": 11182,\n \"s\": 11113,\n \"text\": \"Windows OS − Unzip the zip package and the Apache NiFi is installed.\"\n },\n {\n \"code\": null,\n \"e\": 11256,\n \"s\": 11182,\n \"text\": \"UNIX OS − Extract tar file in any location and the Logstash is installed.\"\n },\n {\n \"code\": null,\n \"e\": 11330,\n \"s\": 11256,\n \"text\": \"UNIX OS − Extract tar file in any location and the Logstash is installed.\"\n },\n {\n \"code\": null,\n \"e\": 11363,\n \"s\": 11330,\n \"text\": \"$tar -xvf nifi-1.6.0-bin.tar.gz\\n\"\n },\n {\n \"code\": null,\n \"e\": 11489,\n \"s\": 11363,\n \"text\": \"Step 4 − Open command prompt, go to the bin directory of NiFi. For example, C:\\\\nifi-1.7.1\\\\bin, and execute run-nifi.bat file.\"\n },\n {\n \"code\": null,\n \"e\": 11521,\n \"s\": 11489,\n \"text\": \"C:\\\\nifi-1.7.1\\\\bin>run-nifi.bat\\n\"\n },\n {\n \"code\": null,\n \"e\": 11694,\n \"s\": 11521,\n \"text\": \"Step 5 − It will take a few minutes to get the NiFi UI up. A user cancheck nifi-app.log, once NiFi UI is up then, a user can enter http://localhost:8080/nifi/ to access UI.\"\n },\n {\n \"code\": null,\n \"e\": 11949,\n \"s\": 11694,\n \"text\": \"Apache is a web-based platform that can be accessed by a user using web UI. The NiFi UI is very interactive and provides a wide variety of information about NiFi. As shown in the image below, a user can access information about the following attributes −\"\n },\n {\n \"code\": null,\n \"e\": 11964,\n \"s\": 11949,\n \"text\": \"Active Threads\"\n },\n {\n \"code\": null,\n \"e\": 11982,\n \"s\": 11964,\n \"text\": \"Total queued data\"\n },\n {\n \"code\": null,\n \"e\": 12017,\n \"s\": 11982,\n \"text\": \"Transmitting Remote Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12056,\n \"s\": 12017,\n \"text\": \"Not Transmitting Remote Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12075,\n \"s\": 12056,\n \"text\": \"Running Components\"\n },\n {\n \"code\": null,\n \"e\": 12094,\n \"s\": 12075,\n \"text\": \"Stopped Components\"\n },\n {\n \"code\": null,\n \"e\": 12113,\n \"s\": 12094,\n \"text\": \"Invalid Components\"\n },\n {\n \"code\": null,\n \"e\": 12133,\n \"s\": 12113,\n \"text\": \"Disabled Components\"\n },\n {\n \"code\": null,\n \"e\": 12169,\n \"s\": 12133,\n \"text\": \"Up to date Versioned Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12211,\n \"s\": 12169,\n \"text\": \"Locally modified Versioned Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12242,\n \"s\": 12211,\n \"text\": \"Stale Versioned Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12294,\n \"s\": 12242,\n \"text\": \"Locally modified and Stale Versioned Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12332,\n \"s\": 12294,\n \"text\": \"Sync failure Versioned Process Groups\"\n },\n {\n \"code\": null,\n \"e\": 12378,\n \"s\": 12332,\n \"text\": \"Apache NiFi UI has the following components −\"\n },\n {\n \"code\": null,\n \"e\": 12483,\n \"s\": 12378,\n \"text\": \"User can drag the process icon on the canvas and select the desired processor for the data flow in NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 12557,\n \"s\": 12483,\n \"text\": \"Below icon is dragged to canvas to add the input port into any data flow.\"\n },\n {\n \"code\": null,\n \"e\": 12652,\n \"s\": 12557,\n \"text\": \"Input port is used to get data from the processor, which is not present in that process group.\"\n },\n {\n \"code\": null,\n \"e\": 12765,\n \"s\": 12652,\n \"text\": \"After dragging this icon, NiFi asks to enter the name of the Input port and then it is added to the NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 12844,\n \"s\": 12765,\n \"text\": \"The below icon is dragged to canvas to add the output port into any data flow.\"\n },\n {\n \"code\": null,\n \"e\": 12947,\n \"s\": 12844,\n \"text\": \"The output port is used to transfer data to the processor, which is not present in that process group.\"\n },\n {\n \"code\": null,\n \"e\": 13061,\n \"s\": 12947,\n \"text\": \"After dragging this icon, NiFi asks to enter the name of the Output port and then it is added to the NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 13125,\n \"s\": 13061,\n \"text\": \"A user uses below icon to add process group in the NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 13241,\n \"s\": 13125,\n \"text\": \"After dragging this icon, NiFi asks to enter the name of the Process Group and then it is added to the NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 13298,\n \"s\": 13241,\n \"text\": \"This is used to add Remote process group in NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 13442,\n \"s\": 13298,\n \"text\": \"Funnel is used to transfer the output of a processor to multiple processors. User can use the below icon to add the funnel in a NiFi data flow.\"\n },\n {\n \"code\": null,\n \"e\": 13579,\n \"s\": 13442,\n \"text\": \"This icon is used to add a data flow template to NiFi canvas. This helps to reuse the data flow in the same or different NiFi instances.\"\n },\n {\n \"code\": null,\n \"e\": 13654,\n \"s\": 13579,\n \"text\": \"After dragging, a user can select the templates already added in the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 13800,\n \"s\": 13654,\n \"text\": \"These are used to add text on NiFi canvas about any component present in NiFi. It offers a range of colors used by a user to add aesthetic sense.\"\n },\n {\n \"code\": null,\n \"e\": 14123,\n \"s\": 13800,\n \"text\": \"Apache NiFi processors are the basic blocks of creating a data flow. Every processor has different functionality, which contributes to the creation of output flowfile. Dataflow shown in the image below is fetching file from one directory using GetFile processor and storing it in another directory using PutFile processor.\"\n },\n {\n \"code\": null,\n \"e\": 14327,\n \"s\": 14123,\n \"text\": \"GetFile process is used to fetch files of a specific format from a specific directory. It also provides other options to user for more control on fetching. We will discuss it in properties section below.\"\n },\n {\n \"code\": null,\n \"e\": 14387,\n \"s\": 14327,\n \"text\": \"Following are the different settings of GetFile processor −\"\n },\n {\n \"code\": null,\n \"e\": 14536,\n \"s\": 14387,\n \"text\": \"In the Name setting, a user can define any name for the processors either according to the project or by that, which makes the name more meaningful.\"\n },\n {\n \"code\": null,\n \"e\": 14599,\n \"s\": 14536,\n \"text\": \"A user can enable or disable the processor using this setting.\"\n },\n {\n \"code\": null,\n \"e\": 14692,\n \"s\": 14599,\n \"text\": \"This setting lets a user to add the penalty time duration, in the event of flowfile failure.\"\n },\n {\n \"code\": null,\n \"e\": 14808,\n \"s\": 14692,\n \"text\": \"This setting is used to specify the yield time for processor. In this duration, the process is not scheduled again.\"\n },\n {\n \"code\": null,\n \"e\": 14873,\n \"s\": 14808,\n \"text\": \"This setting is used to specify the log level of that processor.\"\n },\n {\n \"code\": null,\n \"e\": 15092,\n \"s\": 14873,\n \"text\": \"This has a list of check of all the available relationship of that particular process. By checking the boxes, a user can program processor to terminate the flowfile on that event and do not send it further in the flow.\"\n },\n {\n \"code\": null,\n \"e\": 15170,\n \"s\": 15092,\n \"text\": \"These are the following scheduling options offered by the GetFile processor −\"\n },\n {\n \"code\": null,\n \"e\": 15307,\n \"s\": 15170,\n \"text\": \"You can either schedule the process on time basis by selecting time driven or a specified CRON string by selecting a CRON driver option.\"\n },\n {\n \"code\": null,\n \"e\": 15386,\n \"s\": 15307,\n \"text\": \"This option is used to define the concurrent task schedule for this processor.\"\n },\n {\n \"code\": null,\n \"e\": 15492,\n \"s\": 15386,\n \"text\": \"A user can define whether to run the processor in all nodes or only in Primary node by using this option.\"\n },\n {\n \"code\": null,\n \"e\": 15592,\n \"s\": 15492,\n \"text\": \"It is used to define the time for time driven strategy or CRON expression for CRON driven strategy.\"\n },\n {\n \"code\": null,\n \"e\": 15848,\n \"s\": 15592,\n \"text\": \"GetFile offers multiple properties as shown in the image below raging compulsory\\nproperties like Input directory and file filter to optional properties like Path Filter and Maximum file Size. A user can manage file fetching process using these properties.\"\n },\n {\n \"code\": null,\n \"e\": 15913,\n \"s\": 15848,\n \"text\": \"This Section is used to specify any information about processor.\"\n },\n {\n \"code\": null,\n \"e\": 16004,\n \"s\": 15913,\n \"text\": \"The PutFile processor is used to store the file from the data flow to a specific location.\"\n },\n {\n \"code\": null,\n \"e\": 16055,\n \"s\": 16004,\n \"text\": \"The PutFile processor has the following settings −\"\n },\n {\n \"code\": null,\n \"e\": 16203,\n \"s\": 16055,\n \"text\": \"In the Name setting, a user can define any name for the processors either according to the project or by that which makes the name more meaningful.\"\n },\n {\n \"code\": null,\n \"e\": 16266,\n \"s\": 16203,\n \"text\": \"A user can enable or disable the processor using this setting.\"\n },\n {\n \"code\": null,\n \"e\": 16356,\n \"s\": 16266,\n \"text\": \"This setting lets a user add the penalty time duration, in the event of flowfile failure.\"\n },\n {\n \"code\": null,\n \"e\": 16478,\n \"s\": 16356,\n \"text\": \"This setting is used to specify the yield time for processor. In this duration, the process does not get scheduled again.\"\n },\n {\n \"code\": null,\n \"e\": 16543,\n \"s\": 16478,\n \"text\": \"This setting is used to specify the log level of that processor.\"\n },\n {\n \"code\": null,\n \"e\": 16769,\n \"s\": 16543,\n \"text\": \"This settings has a list of check of all the available relationship of that particular process. By checking the boxes, user can program processor to terminate the flowfile on that event and do not send it further in the flow.\"\n },\n {\n \"code\": null,\n \"e\": 16847,\n \"s\": 16769,\n \"text\": \"These are the following scheduling options offered by the PutFile processor −\"\n },\n {\n \"code\": null,\n \"e\": 17090,\n \"s\": 16847,\n \"text\": \"You can schedule the process on time basis either by selecting timer driven or a specified CRON string by selecting CRON driver option. There is also an Experimental strategy Event Driven, which will trigger the processor on a specific event.\"\n },\n {\n \"code\": null,\n \"e\": 17169,\n \"s\": 17090,\n \"text\": \"This option is used to define the concurrent task schedule for this processor.\"\n },\n {\n \"code\": null,\n \"e\": 17275,\n \"s\": 17169,\n \"text\": \"A user can define whether to run the processor in all nodes or only in primary node by using this option.\"\n },\n {\n \"code\": null,\n \"e\": 17376,\n \"s\": 17275,\n \"text\": \"It is used to define the time for timer driven strategy or CRON expression for CRON driven strategy.\"\n },\n {\n \"code\": null,\n \"e\": 17561,\n \"s\": 17376,\n \"text\": \"The PutFile processor provides properties like Directory to specify the output directory for the purpose of file transfer and others to manage the transfer as shown in the image below.\"\n },\n {\n \"code\": null,\n \"e\": 17626,\n \"s\": 17561,\n \"text\": \"This Section is used to specify any information about processor.\"\n },\n {\n \"code\": null,\n \"e\": 17698,\n \"s\": 17626,\n \"text\": \"In this chapter, we will discuss process categorization in Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 17961,\n \"s\": 17698,\n \"text\": \"The processors under Data Ingestion category are used to ingest data into the NiFi data flow. These are mainly the starting point of any data flow in apache NiFi. Some of the processors that belong to these categories are GetFile, GetHTTP, GetFTP, GetKAFKA, etc.\"\n },\n {\n \"code\": null,\n \"e\": 18327,\n \"s\": 17961,\n \"text\": \"Routing and Mediation processors are used to route the flowfiles to different processors or data flows according to the information in attributes or content of those flowfiles. These processors are also responsible to control the NiFi data flows. Some of the processors that belong to this category are RouteOnAttribute, RouteOnContent, ControlRate, RouteText, etc.\"\n },\n {\n \"code\": null,\n \"e\": 18685,\n \"s\": 18327,\n \"text\": \"The processors of this Database Access category are capable of selecting or inserting data or executing and preparing other SQL statements from database. These processors mainly use data connection pool controller setting of Apache NiFi. Some of the processors that belong to this category are ExecuteSQL, PutSQL, PutDatabaseRecord, ListDatabaseTables, etc.\"\n },\n {\n \"code\": null,\n \"e\": 18942,\n \"s\": 18685,\n \"text\": \"Attribute Extraction Processors are responsible to extract, analyze, change flowfile attributes processing in the NiFi data flow. Some of the processors that belong to this category are UpdateAttribute, EvaluateJSONPath, ExtractText, AttributesToJSON, etc.\"\n },\n {\n \"code\": null,\n \"e\": 19261,\n \"s\": 18942,\n \"text\": \"System Interaction processors are used to run processes or commands in any operating system. These processors also run scripts in many languages to interact with a variety of systems. Some of the processors that belong to this category are ExecuteScript, ExecuteProcess, ExecuteGroovyScript, ExecuteStreamCommand, etc.\"\n },\n {\n \"code\": null,\n \"e\": 19596,\n \"s\": 19261,\n \"text\": \"Processors that belong to Data Transformation are capable of altering content of the flowfiles. These can be used to fully replace the data of a flowfile normally used when a user has to send flowfile as an HTTP body to invokeHTTP processor. Some of the processors that belong to this category are ReplaceText, JoltTransformJSON, etc.\"\n },\n {\n \"code\": null,\n \"e\": 19965,\n \"s\": 19596,\n \"text\": \"Sending Data Processors are generally the end processor in a data flow. These processors are responsible to store or send data to the destination server. After successful storing or sending the data, these processors DROP the flowfile with success relationship. Some of the processors that belong to this category are PutEmail, PutKafka, PutSFTP, PutFile, PutFTP, etc.\"\n },\n {\n \"code\": null,\n \"e\": 20166,\n \"s\": 19965,\n \"text\": \"These processors are used to split and merge the content present in a flowfile. Some of the processors that belong to this category are SplitText, SplitJson, SplitXml, MergeContent, SplitContent, etc.\"\n },\n {\n \"code\": null,\n \"e\": 20314,\n \"s\": 20166,\n \"text\": \"These processors deal with the HTTP and HTTPS calls. Some of the processors that belong to this category are InvokeHTTP, PostHTTP, ListenHTTP, etc.\"\n },\n {\n \"code\": null,\n \"e\": 20498,\n \"s\": 20314,\n \"text\": \"AWS processors are responsible to interaction with Amazon web services system. Some of the processors that belong to this category are GetSQS, PutSNS, PutS3Object, FetchS3Object, etc.\"\n },\n {\n \"code\": null,\n \"e\": 20758,\n \"s\": 20498,\n \"text\": \"In an Apache NiFi data flow, flowfiles move from one to another processor through connection that gets validated using a relationship between processors. Whenever a connection is created, a developer selects one or more relationships between those processors.\"\n },\n {\n \"code\": null,\n \"e\": 21005,\n \"s\": 20758,\n \"text\": \"As you can see in the above image, the check boxes in black rectangle are relationships. If a developer selects these check boxes then, the flowfile will terminate in that particular processor, when the relationship is success or failure or both.\"\n },\n {\n \"code\": null,\n \"e\": 21216,\n \"s\": 21005,\n \"text\": \"When a processor successfully processes a flowfile like store or fetch data from any datasource without getting any connection, authentication or any other error, then the flowfile goes to success relationship.\"\n },\n {\n \"code\": null,\n \"e\": 21386,\n \"s\": 21216,\n \"text\": \"When a processor is not able to process a flowfile without errors like authentication error or connection problem, etc. then the flowfile goes to a failure relationship.\"\n },\n {\n \"code\": null,\n \"e\": 21616,\n \"s\": 21386,\n \"text\": \"A developer can also transfer the flowfiles to other processors using connections. The developer can select and also load balance it, but load balancing is just released in version 1.8, which will not be covered in this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 21882,\n \"s\": 21616,\n \"text\": \"As you can see in the above image the connection marked in red have failure relationship, which means all flowfiles with errors will go to the processor in left and respectively all the flowfiles without errors will be transferred to the connection marked in green.\"\n },\n {\n \"code\": null,\n \"e\": 21931,\n \"s\": 21882,\n \"text\": \"Let us now proceed with the other relationships.\"\n },\n {\n \"code\": null,\n \"e\": 22050,\n \"s\": 21931,\n \"text\": \"This relationship is met, when a Flowfile could not be fetched from the remote server due to a communications failure.\"\n },\n {\n \"code\": null,\n \"e\": 22166,\n \"s\": 22050,\n \"text\": \"Any Flowfile for which we receive a ‘Not Found’ message from the remote server will move to not.found relationship.\"\n },\n {\n \"code\": null,\n \"e\": 22298,\n \"s\": 22166,\n \"text\": \"When NiFi unable to fetch a flowfile from the remote server due to insufficient permission, it will move through this relationship.\"\n },\n {\n \"code\": null,\n \"e\": 22580,\n \"s\": 22298,\n \"text\": \"A flowfile is a basic processing entity in Apache NiFi. It contains data contents and attributes, which are used by NiFi processors to process data. The file content normally contains the data fetched from source systems. The most common attributes of an Apache NiFi FlowFile are −\"\n },\n {\n \"code\": null,\n \"e\": 22687,\n \"s\": 22580,\n \"text\": \"This stands for Universally Unique Identifier, which is a unique identity of a flowfile generated by NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 22792,\n \"s\": 22687,\n \"text\": \"This attribute contains the filename of that flowfile and it should not contain any directory structure.\"\n },\n {\n \"code\": null,\n \"e\": 22841,\n \"s\": 22792,\n \"text\": \"It contains the size of an Apache NiFi FlowFile.\"\n },\n {\n \"code\": null,\n \"e\": 22886,\n \"s\": 22841,\n \"text\": \"It specifies the MIME Type of this FlowFile.\"\n },\n {\n \"code\": null,\n \"e\": 23002,\n \"s\": 22886,\n \"text\": \"This attribute contains the relative path of a file to which a flowfile belongs and does not contain the file name.\"\n },\n {\n \"code\": null,\n \"e\": 23202,\n \"s\": 23002,\n \"text\": \"The Apache NiFi data flow connection has a queuing system to handle the large amount of data inflow. These queues can handle very large amount of FlowFiles to let the processor process them serially.\"\n },\n {\n \"code\": null,\n \"e\": 23560,\n \"s\": 23202,\n \"text\": \"The queue in the above image has 1 flowfile transferred through success relationship. A user can check the flowfile by selecting the List queue option in the drop down list. In case of any overload or error, a user can also clear the queue by selecting the empty queue option and then the user can restart the flow to get those files again in the data flow.\"\n },\n {\n \"code\": null,\n \"e\": 23816,\n \"s\": 23560,\n \"text\": \"The list of flowfiles in a queue, consist of position, UUID, Filename, File size, Queue Duration, and Lineage Duration. A user can see all the attributes and content of a flowfile by clicking the info icon present at the first column of the flowfile list.\"\n },\n {\n \"code\": null,\n \"e\": 24009,\n \"s\": 23816,\n \"text\": \"In Apache NiFi, a user can maintain different data flows in different process groups. These groups can be based on different projects or the organizations, which Apache NiFi instance supports.\"\n },\n {\n \"code\": null,\n \"e\": 24435,\n \"s\": 24009,\n \"text\": \"The fourth symbol in the menu at the top of the NiFi UI as shown in the above picture is used to add a process group in the NiFi canvas. The process group named\\n“Tutorialspoint.com_ProcessGroup” contains a data flow with four processors currently in stop stage as you can see in the above picture. Process groups can be created in hierarchical manner to manage the data flows in better structure, which is easy to understand.\"\n },\n {\n \"code\": null,\n \"e\": 24572,\n \"s\": 24435,\n \"text\": \"In the footer of NiFi UI, you can see the process groups and can go back to the top of the process group a user is currently present in.\"\n },\n {\n \"code\": null,\n \"e\": 24921,\n \"s\": 24572,\n \"text\": \"To see the full list of process groups present in NiFi, a user can go to the summary by using the menu present in the left top side of the NiFi UI. In summary, there is process groups tab where all the process groups are listed with parameters like Version State, Transferred/Size, In/Size, Read/Write, Out/Size, etc. as shown in the below picture.\"\n },\n {\n \"code\": null,\n \"e\": 25125,\n \"s\": 24921,\n \"text\": \"Apache NiFi offers labels to enable a developer to write information about the components present in the NiFI canvas. The leftmost icon in the top menu of NiFi UI is used to add the label in NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 25277,\n \"s\": 25125,\n \"text\": \"A developer can change the color of the label and the size of the text with a right-click on the label and choose the appropriate option from the menu.\"\n },\n {\n \"code\": null,\n \"e\": 25365,\n \"s\": 25277,\n \"text\": \"Apache NiFi is highly configurable platform. The nifi.properties file in conf directory\"\n },\n {\n \"code\": null,\n \"e\": 25401,\n \"s\": 25365,\n \"text\": \"contains most of the configuration.\"\n },\n {\n \"code\": null,\n \"e\": 25462,\n \"s\": 25401,\n \"text\": \"The commonly used properties of Apache NiFi are as follows −\"\n },\n {\n \"code\": null,\n \"e\": 25545,\n \"s\": 25462,\n \"text\": \"This section contains the properties, which are compulsory to run a NiFi instance.\"\n },\n {\n \"code\": null,\n \"e\": 25713,\n \"s\": 25545,\n \"text\": \"These properties are used to store the state of the components helpful to start the processing, where components left after a restart and in the next schedule running.\"\n },\n {\n \"code\": null,\n \"e\": 25785,\n \"s\": 25713,\n \"text\": \"Let us now look into the important details of the FlowFile repository −\"\n },\n {\n \"code\": null,\n \"e\": 25992,\n \"s\": 25785,\n \"text\": \"Apache NiFi offers support to multiple tools like ambari, zookeeper for administration purposes. NiFi also provides configuration in nifi.properties file to set up HTTPS and other things for administrators.\"\n },\n {\n \"code\": null,\n \"e\": 26280,\n \"s\": 25992,\n \"text\": \"NiFi itself does not handle voting process in cluster. This means when a cluster is created, all the nodes are primary and coordinator. So, zookeeper is configured to manage the voting of primary node and coordinator. The nifi.properties file contains some properties to setup zookeeper.\"\n },\n {\n \"code\": null,\n \"e\": 26508,\n \"s\": 26280,\n \"text\": \"To use NiFi over HTTPS, administrators have to generate keystore and truststore and set some properties in the nifi.properties file. The TLS toolkit can be used to generate all the necessary keys to enable HTTPS in apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 26625,\n \"s\": 26508,\n \"text\": \"There are some other properties, which are used by administrators to manage the NiFi and for its service continuity.\"\n },\n {\n \"code\": null,\n \"e\": 26913,\n \"s\": 26625,\n \"text\": \"Apache NiFi offers a large number of components to help developers to create data flows for any type of protocols or data sources. To create a flow, a developer drags the components from menu bar to canvas and connects them by clicking and dragging the mouse from one component to other.\"\n },\n {\n \"code\": null,\n \"e\": 27165,\n \"s\": 26913,\n \"text\": \"Generally, a NiFi has a listener component at the starting of the flow like getfile, which gets the data from source system. On the other end of there is a transmitter component like putfile and there are components in between, which process the data.\"\n },\n {\n \"code\": null,\n \"e\": 27306,\n \"s\": 27165,\n \"text\": \"For example, let create a flow, which takes an empty file from one directory and add some text in that file and put it in another directory.\"\n },\n {\n \"code\": null,\n \"e\": 27408,\n \"s\": 27306,\n \"text\": \"To begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 27510,\n \"s\": 27408,\n \"text\": \"To begin with, drag the processor icon to the NiFi canvas and select GetFile processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 27554,\n \"s\": 27510,\n \"text\": \"Create an input directory like c:\\\\inputdir.\"\n },\n {\n \"code\": null,\n \"e\": 27598,\n \"s\": 27554,\n \"text\": \"Create an input directory like c:\\\\inputdir.\"\n },\n {\n \"code\": null,\n \"e\": 27743,\n \"s\": 27598,\n \"text\": \"Right-click on the processor and select configure and in properties tab add Input Directory (c:\\\\inputdir) and click apply and go back to canvas.\"\n },\n {\n \"code\": null,\n \"e\": 27888,\n \"s\": 27743,\n \"text\": \"Right-click on the processor and select configure and in properties tab add Input Directory (c:\\\\inputdir) and click apply and go back to canvas.\"\n },\n {\n \"code\": null,\n \"e\": 27978,\n \"s\": 27888,\n \"text\": \"Drag the processor icon to the canvas and select the ReplaceText processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 28068,\n \"s\": 27978,\n \"text\": \"Drag the processor icon to the canvas and select the ReplaceText processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 28241,\n \"s\": 28068,\n \"text\": \"Right-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply.\"\n },\n {\n \"code\": null,\n \"e\": 28414,\n \"s\": 28241,\n \"text\": \"Right-click on the processor and select configure. In the properties tab, add some text like “Hello tutorialspoint.com” in the textbox of Replacement Value and click apply.\"\n },\n {\n \"code\": null,\n \"e\": 28513,\n \"s\": 28414,\n \"text\": \"Go to settings tab, check the failure checkbox at right hand side, and then go back to the canvas.\"\n },\n {\n \"code\": null,\n \"e\": 28612,\n \"s\": 28513,\n \"text\": \"Go to settings tab, check the failure checkbox at right hand side, and then go back to the canvas.\"\n },\n {\n \"code\": null,\n \"e\": 28678,\n \"s\": 28612,\n \"text\": \"Connect GetFIle processor to ReplaceText on success relationship.\"\n },\n {\n \"code\": null,\n \"e\": 28744,\n \"s\": 28678,\n \"text\": \"Connect GetFIle processor to ReplaceText on success relationship.\"\n },\n {\n \"code\": null,\n \"e\": 28830,\n \"s\": 28744,\n \"text\": \"Drag the processor icon to the canvas and select the PutFile processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 28916,\n \"s\": 28830,\n \"text\": \"Drag the processor icon to the canvas and select the PutFile processor from the list.\"\n },\n {\n \"code\": null,\n \"e\": 28962,\n \"s\": 28916,\n \"text\": \"Create an output directory like c:\\\\outputdir.\"\n },\n {\n \"code\": null,\n \"e\": 29008,\n \"s\": 28962,\n \"text\": \"Create an output directory like c:\\\\outputdir.\"\n },\n {\n \"code\": null,\n \"e\": 29150,\n \"s\": 29008,\n \"text\": \"Right-click on the processor and select configure. In the properties tab, add Directory (c:\\\\outputdir) and click apply and go back to canvas.\"\n },\n {\n \"code\": null,\n \"e\": 29292,\n \"s\": 29150,\n \"text\": \"Right-click on the processor and select configure. In the properties tab, add Directory (c:\\\\outputdir) and click apply and go back to canvas.\"\n },\n {\n \"code\": null,\n \"e\": 29405,\n \"s\": 29292,\n \"text\": \"Go to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas.\"\n },\n {\n \"code\": null,\n \"e\": 29518,\n \"s\": 29405,\n \"text\": \"Go to settings tab and check the failure and success checkbox at right hand side and then go back to the canvas.\"\n },\n {\n \"code\": null,\n \"e\": 29588,\n \"s\": 29518,\n \"text\": \"Connect the ReplaceText processor to PutFile on success relationship.\"\n },\n {\n \"code\": null,\n \"e\": 29658,\n \"s\": 29588,\n \"text\": \"Connect the ReplaceText processor to PutFile on success relationship.\"\n },\n {\n \"code\": null,\n \"e\": 29814,\n \"s\": 29658,\n \"text\": \"Now start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file.\"\n },\n {\n \"code\": null,\n \"e\": 29970,\n \"s\": 29814,\n \"text\": \"Now start the flow and add an empty file in input directory and you will see that, it will move to output directory and the text will be added to the file.\"\n },\n {\n \"code\": null,\n \"e\": 30119,\n \"s\": 29970,\n \"text\": \"By following the above steps, developers can choose any processor and other NiFi component to create suitable flow for their organisation or client.\"\n },\n {\n \"code\": null,\n \"e\": 30374,\n \"s\": 30119,\n \"text\": \"Apache NiFi offers the concept of Templates, which makes it easier to reuse and distribute the NiFi flows. The flows can be used by other developers or in other NiFi clusters. It also helps NiFi developers to share their work in repositories like GitHub.\"\n },\n {\n \"code\": null,\n \"e\": 30479,\n \"s\": 30374,\n \"text\": \"Let us create a template for the flow, which we created in chapter no 15 “Apache NiFi - Creating Flows”.\"\n },\n {\n \"code\": null,\n \"e\": 30839,\n \"s\": 30479,\n \"text\": \"Select all the components of the flow using shift key and then click on the create template icon at the left hand side of the NiFi canvas. You can also see a tool box as shown in the above image. Click on the icon create template marked in blue as in the above picture. Enter the name for the template. A developer can also add description, which is optional.\"\n },\n {\n \"code\": null,\n \"e\": 30966,\n \"s\": 30839,\n \"text\": \"Then go to the NiFi templates option in the menu present at the top right hand corner of NiFi UI as show in the picture below.\"\n },\n {\n \"code\": null,\n \"e\": 31134,\n \"s\": 30966,\n \"text\": \"Now click the download icon (present at the right hand side in the list) of the template, you want to download. An XML file with the template name will get downloaded.\"\n },\n {\n \"code\": null,\n \"e\": 31353,\n \"s\": 31134,\n \"text\": \"To use a template in NiFi, a developer will have to upload its xml file to NiFi using UI. There is an Upload Template icon (marked with blue in below image) beside Create Template icon click on that and browse the xml.\"\n },\n {\n \"code\": null,\n \"e\": 31486,\n \"s\": 31353,\n \"text\": \"In the top toolbar of NiFi UI, the template icon is before the label icon. The icon is marked in blue as shown in the picture below.\"\n },\n {\n \"code\": null,\n \"e\": 31613,\n \"s\": 31486,\n \"text\": \"Drag the template icon and choose the template from the drop down list and click add. It will add the template to NiFi canvas.\"\n },\n {\n \"code\": null,\n \"e\": 31854,\n \"s\": 31613,\n \"text\": \"NiFi offers a large number of API, which helps developers to make changes and get information of NiFi from any other tool or custom developed applications. In this tutorial, we will use postman app in google chrome to explain some examples.\"\n },\n {\n \"code\": null,\n \"e\": 32007,\n \"s\": 31854,\n \"text\": \"To add postmantoyour Google Chrome, go to the below mentioned URL and click add to chrome button. You will now see a new app added toyour Google Chrome.\"\n },\n {\n \"code\": null,\n \"e\": 32024,\n \"s\": 32007,\n \"text\": \"chrome web store\"\n },\n {\n \"code\": null,\n \"e\": 32131,\n \"s\": 32024,\n \"text\": \"The current version of NiFi rest API is 1.8.0 and the documentation is present in the below mentioned URL.\"\n },\n {\n \"code\": null,\n \"e\": 32190,\n \"s\": 32131,\n \"text\": \"https://nifi.apache.org/docs/nifi-docs/rest-api/index.html\"\n },\n {\n \"code\": null,\n \"e\": 32242,\n \"s\": 32190,\n \"text\": \"Following are the most used NiFi rest API Modules −\"\n },\n {\n \"code\": null,\n \"e\": 32292,\n \"s\": 32242,\n \"text\": \"http://:/nifi-api/\"\n },\n {\n \"code\": null,\n \"e\": 32342,\n \"s\": 32292,\n \"text\": \"http://:/nifi-api/\"\n },\n {\n \"code\": null,\n \"e\": 32418,\n \"s\": 32342,\n \"text\": \"In case HTTPS is enabled\\nhttps://:/nifi-api/\"\n },\n {\n \"code\": null,\n \"e\": 32494,\n \"s\": 32418,\n \"text\": \"In case HTTPS is enabled\\nhttps://:/nifi-api/\"\n },\n {\n \"code\": null,\n \"e\": 32596,\n \"s\": 32494,\n \"text\": \"Let us now consider an example and run on postman to get the details about the running NiFi instance.\"\n },\n {\n \"code\": null,\n \"e\": 32643,\n \"s\": 32596,\n \"text\": \"GET http://localhost:8080/nifi-api/flow/about\\n\"\n },\n {\n \"code\": null,\n \"e\": 32928,\n \"s\": 32643,\n \"text\": \"{\\n \\\"about\\\": {\\n \\\"title\\\": \\\"NiFi\\\",\\n \\\"version\\\": \\\"1.7.1\\\",\\n \\\"uri\\\": \\\"http://localhost:8080/nifi-api/\\\",\\n \\\"contentViewerUrl\\\": \\\"../nifi-content-viewer/\\\",\\n \\\"timezone\\\": \\\"SGT\\\",\\n \\\"buildTag\\\": \\\"nifi-1.7.1-RC1\\\",\\n \\\"buildTimestamp\\\": \\\"07/12/2018 12:54:43 SGT\\\"\\n }\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 33209,\n \"s\": 32928,\n \"text\": \"Apache NiFi logs and store every information about the events occur on the ingested data in the flow. Data provenance repository stores this information and provides UI to search this event information. Data provenance can be accessed for full NiFi level and processor level also.\"\n },\n {\n \"code\": null,\n \"e\": 33324,\n \"s\": 33209,\n \"text\": \"The following table lists down the different fields in the NiFi Data Provenance event list have following fields −\"\n },\n {\n \"code\": null,\n \"e\": 33466,\n \"s\": 33324,\n \"text\": \"To get more information about the event, a user can click on the information icon present in the first column of the NiFi Data Provenance UI.\"\n },\n {\n \"code\": null,\n \"e\": 33575,\n \"s\": 33466,\n \"text\": \"There are some properties in nifi.properties file, which are used to manage NiFi Data Provenance repository.\"\n },\n {\n \"code\": null,\n \"e\": 33786,\n \"s\": 33575,\n \"text\": \"In Apache NiFi, there are multiple ways to monitor the different statistics of the system like errors, memory usage, CPU usage, Data Flow statistics, etc. We will discuss the most popular ones in this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 33864,\n \"s\": 33786,\n \"text\": \"In this section, we will learn more about in built monitoring in Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 34301,\n \"s\": 33864,\n \"text\": \"The bulletin board shows the latest ERROR and WARNING getting generated by NiFi processors in real time. To access the bulletin board, a user will have to go the right hand drop down menu and select the Bulletin Board option. It refreshes automatically and a user can disable it also. A user can also navigate to the actual processor by double-clicking the error. A user can also filter the bulletins by working out with the following −\"\n },\n {\n \"code\": null,\n \"e\": 34312,\n \"s\": 34301,\n \"text\": \"by message\"\n },\n {\n \"code\": null,\n \"e\": 34320,\n \"s\": 34312,\n \"text\": \"by name\"\n },\n {\n \"code\": null,\n \"e\": 34326,\n \"s\": 34320,\n \"text\": \"by id\"\n },\n {\n \"code\": null,\n \"e\": 34338,\n \"s\": 34326,\n \"text\": \"by group id\"\n },\n {\n \"code\": null,\n \"e\": 34603,\n \"s\": 34338,\n \"text\": \"To monitor the Events occurring on any specific processor or throughout NiFi, a user can access the Data provenance from the same menu as the bulletin board. A user can also filter the events in data provenance repository by working out with the following fields −\"\n },\n {\n \"code\": null,\n \"e\": 34621,\n \"s\": 34603,\n \"text\": \"by component name\"\n },\n {\n \"code\": null,\n \"e\": 34639,\n \"s\": 34621,\n \"text\": \"by component type\"\n },\n {\n \"code\": null,\n \"e\": 34647,\n \"s\": 34639,\n \"text\": \"by type\"\n },\n {\n \"code\": null,\n \"e\": 35012,\n \"s\": 34647,\n \"text\": \"Apache NiFi summary also can be accessed from the same menu as the bulletin board. This UI contains information about all the components of that particular NiFi instance or cluster. They can be filtered by name, by type or by URI. There are different tabs for different component types. Following are the components, which can be monitored in the NiFi summary UI −\"\n },\n {\n \"code\": null,\n \"e\": 35023,\n \"s\": 35012,\n \"text\": \"Processors\"\n },\n {\n \"code\": null,\n \"e\": 35035,\n \"s\": 35023,\n \"text\": \"Input ports\"\n },\n {\n \"code\": null,\n \"e\": 35048,\n \"s\": 35035,\n \"text\": \"Output ports\"\n },\n {\n \"code\": null,\n \"e\": 35070,\n \"s\": 35048,\n \"text\": \"Remote process groups\"\n },\n {\n \"code\": null,\n \"e\": 35082,\n \"s\": 35070,\n \"text\": \"Connections\"\n },\n {\n \"code\": null,\n \"e\": 35097,\n \"s\": 35082,\n \"text\": \"Process groups\"\n },\n {\n \"code\": null,\n \"e\": 35209,\n \"s\": 35097,\n \"text\": \"In this UI, there is a link at the bottom right hand side named system diagnostics to check the JVM statistics.\"\n },\n {\n \"code\": null,\n \"e\": 35541,\n \"s\": 35209,\n \"text\": \"Apache NiFi provides multiple reporting tasks to support external monitoring systems like Ambari, Grafana, etc. A developer can create a custom reporting task or can configure the inbuilt ones to send the metrics of NiFi to the externals monitoring systems. The following table lists down the reporting tasks offered by NiFi 1.7.1.\"\n },\n {\n \"code\": null,\n \"e\": 35697,\n \"s\": 35541,\n \"text\": \"There is an API named system diagnostics, which can be used to monitor the NiFI stats in any custom developed application. Let us check the API in postman.\"\n },\n {\n \"code\": null,\n \"e\": 35748,\n \"s\": 35697,\n \"text\": \"http://localhost:8080/nifi-api/system-diagnostics\\n\"\n },\n {\n \"code\": null,\n \"e\": 38653,\n \"s\": 35748,\n \"text\": \"{\\n \\\"systemDiagnostics\\\": {\\n \\\"aggregateSnapshot\\\": {\\n \\\"totalNonHeap\\\": \\\"183.89 MB\\\",\\n \\\"totalNonHeapBytes\\\": 192819200,\\n \\\"usedNonHeap\\\": \\\"173.47 MB\\\",\\n \\\"usedNonHeapBytes\\\": 181894560,\\n \\\"freeNonHeap\\\": \\\"10.42 MB\\\",\\n \\\"freeNonHeapBytes\\\": 10924640,\\n \\\"maxNonHeap\\\": \\\"-1 bytes\\\",\\n \\\"maxNonHeapBytes\\\": -1,\\n \\\"totalHeap\\\": \\\"512 MB\\\",\\n \\\"totalHeapBytes\\\": 536870912,\\n \\\"usedHeap\\\": \\\"273.37 MB\\\",\\n \\\"usedHeapBytes\\\": 286652264,\\n \\\"freeHeap\\\": \\\"238.63 MB\\\",\\n \\\"freeHeapBytes\\\": 250218648,\\n \\\"maxHeap\\\": \\\"512 MB\\\",\\n \\\"maxHeapBytes\\\": 536870912,\\n \\\"heapUtilization\\\": \\\"53.0%\\\",\\n \\\"availableProcessors\\\": 4,\\n \\\"processorLoadAverage\\\": -1,\\n \\\"totalThreads\\\": 71,\\n \\\"daemonThreads\\\": 31,\\n \\\"uptime\\\": \\\"17:30:35.277\\\",\\n \\\"flowFileRepositoryStorageUsage\\\": {\\n \\\"freeSpace\\\": \\\"286.93 GB\\\",\\n \\\"totalSpace\\\": \\\"464.78 GB\\\",\\n \\\"usedSpace\\\": \\\"177.85 GB\\\",\\n \\\"freeSpaceBytes\\\": 308090789888,\\n \\\"totalSpaceBytes\\\": 499057160192,\\n \\\"usedSpaceBytes\\\": 190966370304,\\n \\\"utilization\\\": \\\"38.0%\\\"\\n },\\n \\\"contentRepositoryStorageUsage\\\": [\\n {\\n \\\"identifier\\\": \\\"default\\\",\\n \\\"freeSpace\\\": \\\"286.93 GB\\\",\\n \\\"totalSpace\\\": \\\"464.78 GB\\\",\\n \\\"usedSpace\\\": \\\"177.85 GB\\\",\\n \\\"freeSpaceBytes\\\": 308090789888,\\n \\\"totalSpaceBytes\\\": 499057160192,\\n \\\"usedSpaceBytes\\\": 190966370304,\\n \\\"utilization\\\": \\\"38.0%\\\"\\n }\\n ],\\n \\\"provenanceRepositoryStorageUsage\\\": [\\n {\\n \\\"identifier\\\": \\\"default\\\",\\n \\\"freeSpace\\\": \\\"286.93 GB\\\",\\n \\\"totalSpace\\\": \\\"464.78 GB\\\",\\n \\\"usedSpace\\\": \\\"177.85 GB\\\",\\n \\\"freeSpaceBytes\\\": 308090789888,\\n \\\"totalSpaceBytes\\\": 499057160192,\\n \\\"usedSpaceBytes\\\": 190966370304,\\n \\\"utilization\\\": \\\"38.0%\\\"\\n }\\n ],\\n \\\"garbageCollection\\\": [\\n {\\n \\\"name\\\": \\\"G1 Young Generation\\\",\\n \\\"collectionCount\\\": 344,\\n \\\"collectionTime\\\": \\\"00:00:06.239\\\",\\n \\\"collectionMillis\\\": 6239\\n },\\n {\\n \\\"name\\\": \\\"G1 Old Generation\\\",\\n \\\"collectionCount\\\": 0,\\n \\\"collectionTime\\\": \\\"00:00:00.000\\\",\\n \\\"collectionMillis\\\": 0\\n }\\n ],\\n \\\"statsLastRefreshed\\\": \\\"09:30:20 SGT\\\",\\n \\\"versionInfo\\\": {\\n \\\"niFiVersion\\\": \\\"1.7.1\\\",\\n \\\"javaVendor\\\": \\\"Oracle Corporation\\\",\\n \\\"javaVersion\\\": \\\"1.8.0_151\\\",\\n \\\"osName\\\": \\\"Windows 7\\\",\\n \\\"osVersion\\\": \\\"6.1\\\",\\n \\\"osArchitecture\\\": \\\"amd64\\\",\\n \\\"buildTag\\\": \\\"nifi-1.7.1-RC1\\\",\\n \\\"buildTimestamp\\\": \\\"07/12/2018 12:54:43 SGT\\\"\\n }\\n }\\n }\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 38946,\n \"s\": 38653,\n \"text\": \"Before starting the upgrade of Apache NiFi, read the release notes to know about the changes and additions. A user needs to evaluate the impact of these additions and changes in his/her current NiFi installation. Below is the link to get the release notes for the new releases of Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 39009,\n \"s\": 38946,\n \"text\": \"https://cwiki.apache.org/confluence/display/NIFI/Release+Notes\"\n },\n {\n \"code\": null,\n \"e\": 39156,\n \"s\": 39009,\n \"text\": \"In a cluster setup, a user needs to upgrade NiFi installation of every Node in a cluster. Follow the steps given below to upgrade the Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 39240,\n \"s\": 39156,\n \"text\": \"Backup all the custom NARs present in your current NiFi or lib or any other folder.\"\n },\n {\n \"code\": null,\n \"e\": 39324,\n \"s\": 39240,\n \"text\": \"Backup all the custom NARs present in your current NiFi or lib or any other folder.\"\n },\n {\n \"code\": null,\n \"e\": 39481,\n \"s\": 39324,\n \"text\": \"Download the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version.\\nhttps://nifi.apache.org/download.html\"\n },\n {\n \"code\": null,\n \"e\": 39600,\n \"s\": 39481,\n \"text\": \"Download the new version of Apache NiFi. Below is the link to download the source and binaries of latest NiFi version.\"\n },\n {\n \"code\": null,\n \"e\": 39638,\n \"s\": 39600,\n \"text\": \"https://nifi.apache.org/download.html\"\n },\n {\n \"code\": null,\n \"e\": 39756,\n \"s\": 39638,\n \"text\": \"Create a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 39874,\n \"s\": 39756,\n \"text\": \"Create a new directory in the same installation directory of current NiFi and extract the new version of Apache NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 40039,\n \"s\": 39874,\n \"text\": \"Stop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 40204,\n \"s\": 40039,\n \"text\": \"Stop the NiFi gracefully. First stop all the processors and let all the flowfiles present in the flow get processed. Once, no more flowfile is there, stop the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 40297,\n \"s\": 40204,\n \"text\": \"Copy the configuration of authorizers.xml from current NiFi installation to the new version.\"\n },\n {\n \"code\": null,\n \"e\": 40390,\n \"s\": 40297,\n \"text\": \"Copy the configuration of authorizers.xml from current NiFi installation to the new version.\"\n },\n {\n \"code\": null,\n \"e\": 40509,\n \"s\": 40390,\n \"text\": \"Update the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one.\"\n },\n {\n \"code\": null,\n \"e\": 40628,\n \"s\": 40509,\n \"text\": \"Update the values in bootstrap-notification-services.xml, and bootstrap.conf of new NiFi version from the current one.\"\n },\n {\n \"code\": null,\n \"e\": 40698,\n \"s\": 40628,\n \"text\": \"Add the custom logging from logback.xml to the new NiFi installation.\"\n },\n {\n \"code\": null,\n \"e\": 40768,\n \"s\": 40698,\n \"text\": \"Add the custom logging from logback.xml to the new NiFi installation.\"\n },\n {\n \"code\": null,\n \"e\": 40864,\n \"s\": 40768,\n \"text\": \"Configure the login identity provider in login-identity-providers.xml from the current version.\"\n },\n {\n \"code\": null,\n \"e\": 40960,\n \"s\": 40864,\n \"text\": \"Configure the login identity provider in login-identity-providers.xml from the current version.\"\n },\n {\n \"code\": null,\n \"e\": 41056,\n \"s\": 40960,\n \"text\": \"Update all the properties in nifi.properties of the new NiFi installation from current version.\"\n },\n {\n \"code\": null,\n \"e\": 41152,\n \"s\": 41056,\n \"text\": \"Update all the properties in nifi.properties of the new NiFi installation from current version.\"\n },\n {\n \"code\": null,\n \"e\": 41279,\n \"s\": 41152,\n \"text\": \"Please make sure that the group and user of new version is same as the current version, to avoid any permission denied errors.\"\n },\n {\n \"code\": null,\n \"e\": 41406,\n \"s\": 41279,\n \"text\": \"Please make sure that the group and user of new version is same as the current version, to avoid any permission denied errors.\"\n },\n {\n \"code\": null,\n \"e\": 41494,\n \"s\": 41406,\n \"text\": \"Copy the configuration from state-management.xml of current version to the new version.\"\n },\n {\n \"code\": null,\n \"e\": 41582,\n \"s\": 41494,\n \"text\": \"Copy the configuration from state-management.xml of current version to the new version.\"\n },\n {\n \"code\": null,\n \"e\": 41999,\n \"s\": 41582,\n \"text\": \"Copy the contents of the following directories from current version of NiFi installation to the same directories in the new version.\\n\\n./conf/flow.xml.gz\\nAlso flow.xml.gz from the archive directory.\\nFor provenance and content repositories change the values in nifi. properties file to the current repositories.\\ncopy state from ./state/local or change in nifi.properties if any other external directory is specified.\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 42132,\n \"s\": 41999,\n \"text\": \"Copy the contents of the following directories from current version of NiFi installation to the same directories in the new version.\"\n },\n {\n \"code\": null,\n \"e\": 42151,\n \"s\": 42132,\n \"text\": \"./conf/flow.xml.gz\"\n },\n {\n \"code\": null,\n \"e\": 42170,\n \"s\": 42151,\n \"text\": \"./conf/flow.xml.gz\"\n },\n {\n \"code\": null,\n \"e\": 42215,\n \"s\": 42170,\n \"text\": \"Also flow.xml.gz from the archive directory.\"\n },\n {\n \"code\": null,\n \"e\": 42260,\n \"s\": 42215,\n \"text\": \"Also flow.xml.gz from the archive directory.\"\n },\n {\n \"code\": null,\n \"e\": 42372,\n \"s\": 42260,\n \"text\": \"For provenance and content repositories change the values in nifi. properties file to the current repositories.\"\n },\n {\n \"code\": null,\n \"e\": 42484,\n \"s\": 42372,\n \"text\": \"For provenance and content repositories change the values in nifi. properties file to the current repositories.\"\n },\n {\n \"code\": null,\n \"e\": 42589,\n \"s\": 42484,\n \"text\": \"copy state from ./state/local or change in nifi.properties if any other external directory is specified.\"\n },\n {\n \"code\": null,\n \"e\": 42694,\n \"s\": 42589,\n \"text\": \"copy state from ./state/local or change in nifi.properties if any other external directory is specified.\"\n },\n {\n \"code\": null,\n \"e\": 42860,\n \"s\": 42694,\n \"text\": \"Recheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions.\"\n },\n {\n \"code\": null,\n \"e\": 43026,\n \"s\": 42860,\n \"text\": \"Recheck all the changes performed and check if they have an impact on any new changes added in the new NiFi version. If there is any impact, check for the solutions.\"\n },\n {\n \"code\": null,\n \"e\": 43176,\n \"s\": 43026,\n \"text\": \"Start all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors.\"\n },\n {\n \"code\": null,\n \"e\": 43326,\n \"s\": 43176,\n \"text\": \"Start all the NiFi nodes and verify if all the flows are working correctly and repositories are storing data and Ui is retrieving it with any errors.\"\n },\n {\n \"code\": null,\n \"e\": 43387,\n \"s\": 43326,\n \"text\": \"Monitor bulletins for some time to check for any new errors.\"\n },\n {\n \"code\": null,\n \"e\": 43448,\n \"s\": 43387,\n \"text\": \"Monitor bulletins for some time to check for any new errors.\"\n },\n {\n \"code\": null,\n \"e\": 43564,\n \"s\": 43448,\n \"text\": \"If the new version is working correctly, then the current version can be archived and deleted from the directories.\"\n },\n {\n \"code\": null,\n \"e\": 43680,\n \"s\": 43564,\n \"text\": \"If the new version is working correctly, then the current version can be archived and deleted from the directories.\"\n },\n {\n \"code\": null,\n \"e\": 43944,\n \"s\": 43680,\n \"text\": \"Apache NiFi Remote Process Group or RPG enables flow to direct the FlowFiles in a flow to different NiFi instances using Site-to-Site protocol. As of version 1.7.1, NiFi does not offer balanced relationships, so RPG is used for load balancing in a NiFi data flow.\"\n },\n {\n \"code\": null,\n \"e\": 44133,\n \"s\": 43944,\n \"text\": \"A developer can add the RPG from the top toolbar of NiFi UI by dragging the icon as shown in the above picture to canvas. To configure an RPG, a Developer has to add the following fields −\"\n },\n {\n \"code\": null,\n \"e\": 44225,\n \"s\": 44133,\n \"text\": \"A developer needs to enable it, before using it like we start processors before using them.\"\n },\n {\n \"code\": null,\n \"e\": 44443,\n \"s\": 44225,\n \"text\": \"Apache NiFi offers shared services, which can be shared by processors and reporting task is called controller settings. These are like Database connection pool, which can be used by processors accessing same database.\"\n },\n {\n \"code\": null,\n \"e\": 44565,\n \"s\": 44443,\n \"text\": \"To access the controller settings, use the drop down menu at the right top corner of NiFi UI as shown in the below image.\"\n },\n {\n \"code\": null,\n \"e\": 44690,\n \"s\": 44565,\n \"text\": \"There are many controller settings offered by Apache NiFi, we will discuss a commonly used one and how we set it up in NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 44947,\n \"s\": 44690,\n \"text\": \"Add the plus sign in the Nifi Settings page after clicking the Controller settings option. Then select the DBCPConnectionPool from the list of controller settings. DBCPConnectionPool will be added in the main NiFi settings page as shown in the below image.\"\n },\n {\n \"code\": null,\n \"e\": 45019,\n \"s\": 44947,\n \"text\": \"It contains the following information about the controller setting:Name\"\n },\n {\n \"code\": null,\n \"e\": 45024,\n \"s\": 45019,\n \"text\": \"Type\"\n },\n {\n \"code\": null,\n \"e\": 45031,\n \"s\": 45024,\n \"text\": \"Bundle\"\n },\n {\n \"code\": null,\n \"e\": 45037,\n \"s\": 45031,\n \"text\": \"State\"\n },\n {\n \"code\": null,\n \"e\": 45043,\n \"s\": 45037,\n \"text\": \"Scope\"\n },\n {\n \"code\": null,\n \"e\": 45069,\n \"s\": 45043,\n \"text\": \"Configure and delete icon\"\n },\n {\n \"code\": null,\n \"e\": 45175,\n \"s\": 45069,\n \"text\": \"Click on the configure icon and fill the required fields. The fields are listed down in the table below −\"\n },\n {\n \"code\": null,\n \"e\": 45476,\n \"s\": 45175,\n \"text\": \"To stop or configure a controller setting, first all the attached NiFi components should be stopped. NiFi also adds scope in controller settings to manage the configuration of it. Therefore, only the ones which shared the same settings will not get impacted and will use the same controller settings.\"\n },\n {\n \"code\": null,\n \"e\": 45729,\n \"s\": 45476,\n \"text\": \"Apache NiFi reporting tasks are similar to the controller services, which run in the background and send or log the statistics of NiFi instance. NiFi reporting task can also be accessed from the same page as controller settings, but in a different tab.\"\n },\n {\n \"code\": null,\n \"e\": 46114,\n \"s\": 45729,\n \"text\": \"To add a reporting task, a developer needs to click on the plus button present at the top right hand side of the reporting tasks page. These reporting tasks are mainly used for monitoring the activities of a NiFi instance, in either the bulletins or the provenance. Mainly these reporting tasks uses Site-to-Site to transport the NiFi statistics data to other node or external system.\"\n },\n {\n \"code\": null,\n \"e\": 46181,\n \"s\": 46114,\n \"text\": \"Let us now add a configured reporting task for more understanding.\"\n },\n {\n \"code\": null,\n \"e\": 46348,\n \"s\": 46181,\n \"text\": \"This reporting task is used to generate bulletins, when a memory pool crosses specified percentage. Follow these steps to configure the MonitorMemory reporting task −\"\n },\n {\n \"code\": null,\n \"e\": 46411,\n \"s\": 46348,\n \"text\": \"Add in the plus sign and search for MonitorMemory in the list.\"\n },\n {\n \"code\": null,\n \"e\": 46474,\n \"s\": 46411,\n \"text\": \"Add in the plus sign and search for MonitorMemory in the list.\"\n },\n {\n \"code\": null,\n \"e\": 46513,\n \"s\": 46474,\n \"text\": \"Select MonitorMemory and click on ADD.\"\n },\n {\n \"code\": null,\n \"e\": 46552,\n \"s\": 46513,\n \"text\": \"Select MonitorMemory and click on ADD.\"\n },\n {\n \"code\": null,\n \"e\": 46645,\n \"s\": 46552,\n \"text\": \"Once it is added in the main page of reporting tasks main page, click on the configure icon.\"\n },\n {\n \"code\": null,\n \"e\": 46738,\n \"s\": 46645,\n \"text\": \"Once it is added in the main page of reporting tasks main page, click on the configure icon.\"\n },\n {\n \"code\": null,\n \"e\": 46812,\n \"s\": 46738,\n \"text\": \"In the properties tab, select the memory pool, which you want to monitor.\"\n },\n {\n \"code\": null,\n \"e\": 46886,\n \"s\": 46812,\n \"text\": \"In the properties tab, select the memory pool, which you want to monitor.\"\n },\n {\n \"code\": null,\n \"e\": 46960,\n \"s\": 46886,\n \"text\": \"Select the percentage after which you want bulletins to alert the users.\\n\"\n },\n {\n \"code\": null,\n \"e\": 47034,\n \"s\": 46960,\n \"text\": \"Select the percentage after which you want bulletins to alert the users.\\n\"\n },\n {\n \"code\": null,\n \"e\": 47060,\n \"s\": 47034,\n \"text\": \"Start the reporting task.\"\n },\n {\n \"code\": null,\n \"e\": 47086,\n \"s\": 47060,\n \"text\": \"Start the reporting task.\"\n },\n {\n \"code\": null,\n \"e\": 47258,\n \"s\": 47086,\n \"text\": \"Apache NiFi is an open source platform and gives developers the options to add their custom processor in the NiFi library. Follow these steps to create a custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 47353,\n \"s\": 47258,\n \"text\": \"Download Maven latest version from the link given below.\\nhttps://maven.apache.org/download.cgi\"\n },\n {\n \"code\": null,\n \"e\": 47410,\n \"s\": 47353,\n \"text\": \"Download Maven latest version from the link given below.\"\n },\n {\n \"code\": null,\n \"e\": 47448,\n \"s\": 47410,\n \"text\": \"https://maven.apache.org/download.cgi\"\n },\n {\n \"code\": null,\n \"e\": 47544,\n \"s\": 47448,\n \"text\": \"Add an environment variable named M2_HOME and set value as the installation directory of maven.\"\n },\n {\n \"code\": null,\n \"e\": 47640,\n \"s\": 47544,\n \"text\": \"Add an environment variable named M2_HOME and set value as the installation directory of maven.\"\n },\n {\n \"code\": null,\n \"e\": 47730,\n \"s\": 47640,\n \"text\": \"Download Eclipse IDE from the below link.\\nhttps://www.eclipse.org/downloads/download.php\\n\"\n },\n {\n \"code\": null,\n \"e\": 47772,\n \"s\": 47730,\n \"text\": \"Download Eclipse IDE from the below link.\"\n },\n {\n \"code\": null,\n \"e\": 47820,\n \"s\": 47772,\n \"text\": \"https://www.eclipse.org/downloads/download.php\\n\"\n },\n {\n \"code\": null,\n \"e\": 47877,\n \"s\": 47820,\n \"text\": \"Open command prompt and execute Maven Archetype command.\"\n },\n {\n \"code\": null,\n \"e\": 47934,\n \"s\": 47877,\n \"text\": \"Open command prompt and execute Maven Archetype command.\"\n },\n {\n \"code\": null,\n \"e\": 47960,\n \"s\": 47934,\n \"text\": \"> mvn archetype:generate\\n\"\n },\n {\n \"code\": null,\n \"e\": 48012,\n \"s\": 47960,\n \"text\": \"Search for the nifi type in the archetype projects.\"\n },\n {\n \"code\": null,\n \"e\": 48064,\n \"s\": 48012,\n \"text\": \"Search for the nifi type in the archetype projects.\"\n },\n {\n \"code\": null,\n \"e\": 48128,\n \"s\": 48064,\n \"text\": \"Select org.apache.nifi:nifi-processor-bundle-archetype project.\"\n },\n {\n \"code\": null,\n \"e\": 48192,\n \"s\": 48128,\n \"text\": \"Select org.apache.nifi:nifi-processor-bundle-archetype project.\"\n },\n {\n \"code\": null,\n \"e\": 48279,\n \"s\": 48192,\n \"text\": \"Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 48366,\n \"s\": 48279,\n \"text\": \"Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 48441,\n \"s\": 48366,\n \"text\": \"Enter the groupId, artifactId, version, package, and artifactBaseName etc.\"\n },\n {\n \"code\": null,\n \"e\": 48516,\n \"s\": 48441,\n \"text\": \"Enter the groupId, artifactId, version, package, and artifactBaseName etc.\"\n },\n {\n \"code\": null,\n \"e\": 48642,\n \"s\": 48516,\n \"text\": \"Then a maven project will be created having to directories.\\n\\nnifi--processors\\nnifi--nar\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 48702,\n \"s\": 48642,\n \"text\": \"Then a maven project will be created having to directories.\"\n },\n {\n \"code\": null,\n \"e\": 48737,\n \"s\": 48702,\n \"text\": \"nifi--processors\"\n },\n {\n \"code\": null,\n \"e\": 48772,\n \"s\": 48737,\n \"text\": \"nifi--processors\"\n },\n {\n \"code\": null,\n \"e\": 48800,\n \"s\": 48772,\n \"text\": \"nifi--nar\"\n },\n {\n \"code\": null,\n \"e\": 48828,\n \"s\": 48800,\n \"text\": \"nifi--nar\"\n },\n {\n \"code\": null,\n \"e\": 48933,\n \"s\": 48828,\n \"text\": \"Run the below command in nifi--processors directory to add the project in the eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 49038,\n \"s\": 48933,\n \"text\": \"Run the below command in nifi--processors directory to add the project in the eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 49067,\n \"s\": 49038,\n \"text\": \"mvn install eclipse:eclipse\\n\"\n },\n {\n \"code\": null,\n \"e\": 49118,\n \"s\": 49067,\n \"text\": \"Open eclipse and select import from the file menu.\"\n },\n {\n \"code\": null,\n \"e\": 49169,\n \"s\": 49118,\n \"text\": \"Open eclipse and select import from the file menu.\"\n },\n {\n \"code\": null,\n \"e\": 49298,\n \"s\": 49169,\n \"text\": \"Then select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 49427,\n \"s\": 49298,\n \"text\": \"Then select “Existing Projects into workspace” and add the project from nifi--processors directory in eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 49578,\n \"s\": 49427,\n \"text\": \"Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run.\"\n },\n {\n \"code\": null,\n \"e\": 49729,\n \"s\": 49578,\n \"text\": \"Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run.\"\n },\n {\n \"code\": null,\n \"e\": 49805,\n \"s\": 49729,\n \"text\": \"Then package the code to a NAR file by running the below mentioned command.\"\n },\n {\n \"code\": null,\n \"e\": 49881,\n \"s\": 49805,\n \"text\": \"Then package the code to a NAR file by running the below mentioned command.\"\n },\n {\n \"code\": null,\n \"e\": 49900,\n \"s\": 49881,\n \"text\": \"mvn clean install\\n\"\n },\n {\n \"code\": null,\n \"e\": 49958,\n \"s\": 49900,\n \"text\": \"A NAR file will be created at nifi--nar/target directory.\"\n },\n {\n \"code\": null,\n \"e\": 50016,\n \"s\": 49958,\n \"text\": \"A NAR file will be created at nifi--nar/target directory.\"\n },\n {\n \"code\": null,\n \"e\": 50089,\n \"s\": 50016,\n \"text\": \"Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 50162,\n \"s\": 50089,\n \"text\": \"Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 50251,\n \"s\": 50162,\n \"text\": \"After successful restart of NiFi, check the processor list for the new custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 50340,\n \"s\": 50251,\n \"text\": \"After successful restart of NiFi, check the processor list for the new custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 50384,\n \"s\": 50340,\n \"text\": \"For any errors, check ./logs/nifi.log file.\"\n },\n {\n \"code\": null,\n \"e\": 50428,\n \"s\": 50384,\n \"text\": \"For any errors, check ./logs/nifi.log file.\"\n },\n {\n \"code\": null,\n \"e\": 50634,\n \"s\": 50428,\n \"text\": \"Apache NiFi is an open source platform and gives developers the options to add their custom controllers service in Apache NiFi. The steps and tools are almost the same as used to create a custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 50691,\n \"s\": 50634,\n \"text\": \"Open command prompt and execute Maven Archetype command.\"\n },\n {\n \"code\": null,\n \"e\": 50748,\n \"s\": 50691,\n \"text\": \"Open command prompt and execute Maven Archetype command.\"\n },\n {\n \"code\": null,\n \"e\": 50774,\n \"s\": 50748,\n \"text\": \"> mvn archetype:generate\\n\"\n },\n {\n \"code\": null,\n \"e\": 50826,\n \"s\": 50774,\n \"text\": \"Search for the nifi type in the archetype projects.\"\n },\n {\n \"code\": null,\n \"e\": 50878,\n \"s\": 50826,\n \"text\": \"Search for the nifi type in the archetype projects.\"\n },\n {\n \"code\": null,\n \"e\": 50940,\n \"s\": 50878,\n \"text\": \"Select org.apache.nifi:nifi-service-bundle-archetype project.\"\n },\n {\n \"code\": null,\n \"e\": 51002,\n \"s\": 50940,\n \"text\": \"Select org.apache.nifi:nifi-service-bundle-archetype project.\"\n },\n {\n \"code\": null,\n \"e\": 51087,\n \"s\": 51002,\n \"text\": \"Then from the list of versions, select the latest version – 1.7.1 for this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 51172,\n \"s\": 51087,\n \"text\": \"Then from the list of versions, select the latest version – 1.7.1 for this tutorial.\"\n },\n {\n \"code\": null,\n \"e\": 51248,\n \"s\": 51172,\n \"text\": \"Enter the groupId, artifactId, version, package, and artifactBaseName, etc.\"\n },\n {\n \"code\": null,\n \"e\": 51324,\n \"s\": 51248,\n \"text\": \"Enter the groupId, artifactId, version, package, and artifactBaseName, etc.\"\n },\n {\n \"code\": null,\n \"e\": 51491,\n \"s\": 51324,\n \"text\": \"A maven project will be created having directories.\\n\\nnifi-\\nnifi--nar\\nnifi--api\\nnifi--api-nar\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 51543,\n \"s\": 51491,\n \"text\": \"A maven project will be created having directories.\"\n },\n {\n \"code\": null,\n \"e\": 51567,\n \"s\": 51543,\n \"text\": \"nifi-\"\n },\n {\n \"code\": null,\n \"e\": 51591,\n \"s\": 51567,\n \"text\": \"nifi-\"\n },\n {\n \"code\": null,\n \"e\": 51619,\n \"s\": 51591,\n \"text\": \"nifi--nar\"\n },\n {\n \"code\": null,\n \"e\": 51647,\n \"s\": 51619,\n \"text\": \"nifi--nar\"\n },\n {\n \"code\": null,\n \"e\": 51675,\n \"s\": 51647,\n \"text\": \"nifi--api\"\n },\n {\n \"code\": null,\n \"e\": 51703,\n \"s\": 51675,\n \"text\": \"nifi--api\"\n },\n {\n \"code\": null,\n \"e\": 51735,\n \"s\": 51703,\n \"text\": \"nifi--api-nar\"\n },\n {\n \"code\": null,\n \"e\": 51767,\n \"s\": 51735,\n \"text\": \"nifi--api-nar\"\n },\n {\n \"code\": null,\n \"e\": 51933,\n \"s\": 51767,\n \"text\": \"Run the below command in nifi- and nifi--api directories to add these two projects in the eclipse.\\n\\nmvn install eclipse:eclipse\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 52068,\n \"s\": 51933,\n \"text\": \"Run the below command in nifi- and nifi--api directories to add these two projects in the eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 52096,\n \"s\": 52068,\n \"text\": \"mvn install eclipse:eclipse\"\n },\n {\n \"code\": null,\n \"e\": 52124,\n \"s\": 52096,\n \"text\": \"mvn install eclipse:eclipse\"\n },\n {\n \"code\": null,\n \"e\": 52175,\n \"s\": 52124,\n \"text\": \"Open eclipse and select import from the file menu.\"\n },\n {\n \"code\": null,\n \"e\": 52226,\n \"s\": 52175,\n \"text\": \"Open eclipse and select import from the file menu.\"\n },\n {\n \"code\": null,\n \"e\": 52378,\n \"s\": 52226,\n \"text\": \"Then select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 52530,\n \"s\": 52378,\n \"text\": \"Then select “Existing Projects into workspace” and add the project from nifi- and nifi--api directories in eclipse.\"\n },\n {\n \"code\": null,\n \"e\": 52565,\n \"s\": 52530,\n \"text\": \"Add your code in the source files.\"\n },\n {\n \"code\": null,\n \"e\": 52600,\n \"s\": 52565,\n \"text\": \"Add your code in the source files.\"\n },\n {\n \"code\": null,\n \"e\": 52697,\n \"s\": 52600,\n \"text\": \"Then package the code to a NAR file by running the below mentioned command.\\n\\nmvn clean install\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 52773,\n \"s\": 52697,\n \"text\": \"Then package the code to a NAR file by running the below mentioned command.\"\n },\n {\n \"code\": null,\n \"e\": 52791,\n \"s\": 52773,\n \"text\": \"mvn clean install\"\n },\n {\n \"code\": null,\n \"e\": 52809,\n \"s\": 52791,\n \"text\": \"mvn clean install\"\n },\n {\n \"code\": null,\n \"e\": 52928,\n \"s\": 52809,\n \"text\": \"Two NAR files will be created in each nifi-/target and nifi--api/target directory.\"\n },\n {\n \"code\": null,\n \"e\": 53047,\n \"s\": 52928,\n \"text\": \"Two NAR files will be created in each nifi-/target and nifi--api/target directory.\"\n },\n {\n \"code\": null,\n \"e\": 53123,\n \"s\": 53047,\n \"text\": \"Copy these NAR files to the lib folder of Apache NiFi and restart the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 53199,\n \"s\": 53123,\n \"text\": \"Copy these NAR files to the lib folder of Apache NiFi and restart the NiFi.\"\n },\n {\n \"code\": null,\n \"e\": 53288,\n \"s\": 53199,\n \"text\": \"After successful restart of NiFi, check the processor list for the new custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 53377,\n \"s\": 53288,\n \"text\": \"After successful restart of NiFi, check the processor list for the new custom processor.\"\n },\n {\n \"code\": null,\n \"e\": 53421,\n \"s\": 53377,\n \"text\": \"For any errors, check ./logs/nifi.log file.\"\n },\n {\n \"code\": null,\n \"e\": 53465,\n \"s\": 53421,\n \"text\": \"For any errors, check ./logs/nifi.log file.\"\n },\n {\n \"code\": null,\n \"e\": 53724,\n \"s\": 53465,\n \"text\": \"Apache NiFi uses logback library to handle its logging. There is a file logback.xml present in the conf directory of NiFi, which is used to configure the logging in NiFi. The logs are generated in logs folder of NiFi and the log files are as described below.\"\n },\n {\n \"code\": null,\n \"e\": 53988,\n \"s\": 53724,\n \"text\": \"This is the main log file of nifi, which logs all the activities of apache NiFi application ranging from NAR files loading to the run time errors or bulletins encountered by NiFi components. Below is the default appender in logback.xml file for nifi-app.log file.\"\n },\n {\n \"code\": null,\n \"e\": 54679,\n \"s\": 53988,\n \"text\": \"\\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-app.log\\n \\n \\n ${org.apache.nifi.bootstrap.config.log.dir}/\\n\\t nifi-app_%d{yyyy-MM-dd_HH}.%i.log\\n \\n 100MB\\n 30\\n \\n true\\n \\n %date %level [%thread] %logger{40} %msg%n\\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 54973,\n \"s\": 54679,\n \"text\": \"The appender name is APP_FILE, and the class is RollingFileAppender, which means logger is using rollback policy. By default, the max file size is 100 MB and can be changed to the required size. The maximum retention for APP_FILE is 30 log files and can be changed as per the user requirement.\"\n },\n {\n \"code\": null,\n \"e\": 55128,\n \"s\": 54973,\n \"text\": \"This log contains the user events like web security, web api config, user authorization, etc. Below is the appender for nifi-user.log in logback.xml file.\"\n },\n {\n \"code\": null,\n \"e\": 55714,\n \"s\": 55128,\n \"text\": \"\\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-user.log\\n \\n \\n ${org.apache.nifi.bootstrap.config.log.dir}/\\n\\t nifi-user_%d.log\\n \\n 30\\n \\n \\n %date %level [%thread] %logger{40} %msg%n\\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 55916,\n \"s\": 55714,\n \"text\": \"The appender name is USER_FILE. It follows the rollover policy. The maximum retention period for USER_FILE is 30 log files. Below is the default loggers for USER_FILE appender present in nifi-user.log.\"\n },\n {\n \"code\": null,\n \"e\": 56550,\n \"s\": 55916,\n \"text\": \"\\n \\n\\n\\n \\n\\n\\n \\n\\n\\n \\n\\n\\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 56807,\n \"s\": 56550,\n \"text\": \"This log contains the bootstrap logs, apache NiFi’s standard output (all system.out written in the code mainly for debugging), and standard error (all system.err written in the code). Below is the default appender for the nifi-bootstrap.log in logback.log.\"\n },\n {\n \"code\": null,\n \"e\": 57396,\n \"s\": 56807,\n \"text\": \"\\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap.log\\n \\n \\n ${org.apache.nifi.bootstrap.config.log.dir}/nifi-bootstrap_%d.log\\n \\n 5\\n \\n \\n %date %level [%thread] %logger{40} %msg%n\\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 57615,\n \"s\": 57396,\n \"text\": \"nifi-bootstrap.log file,s appender name is BOOTSTRAP_FILE, which also follows rollback policy. The maximum retention for BOOTSTRAP_FILE appender is 5 log files. Below is the default loggers for nifi-bootstrap.log file.\"\n },\n {\n \"code\": null,\n \"e\": 58152,\n \"s\": 57615,\n \"text\": \"\\n \\n\\n\\n \\n \\n\\n\\n \\n\\n\\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 58187,\n \"s\": 58152,\n \"text\": \"\\n 46 Lectures \\n 3.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58206,\n \"s\": 58187,\n \"text\": \" Arnab Chakraborty\"\n },\n {\n \"code\": null,\n \"e\": 58241,\n \"s\": 58206,\n \"text\": \"\\n 23 Lectures \\n 1.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58262,\n \"s\": 58241,\n \"text\": \" Mukund Kumar Mishra\"\n },\n {\n \"code\": null,\n \"e\": 58295,\n \"s\": 58262,\n \"text\": \"\\n 16 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58308,\n \"s\": 58295,\n \"text\": \" Nilay Mehta\"\n },\n {\n \"code\": null,\n \"e\": 58343,\n \"s\": 58308,\n \"text\": \"\\n 52 Lectures \\n 1.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58361,\n \"s\": 58343,\n \"text\": \" Bigdata Engineer\"\n },\n {\n \"code\": null,\n \"e\": 58394,\n \"s\": 58361,\n \"text\": \"\\n 14 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58412,\n \"s\": 58394,\n \"text\": \" Bigdata Engineer\"\n },\n {\n \"code\": null,\n \"e\": 58445,\n \"s\": 58412,\n \"text\": \"\\n 23 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 58463,\n \"s\": 58445,\n \"text\": \" Bigdata Engineer\"\n },\n {\n \"code\": null,\n \"e\": 58470,\n \"s\": 58463,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 58481,\n \"s\": 58470,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":45,"cells":{"title":{"kind":"string","value":"Explain abstract class in PHP."},"text":{"kind":"string","value":"PHP5 comes with the object-oriented model with it, Some of the concepts of object-oriented model are: class, object, encapsulation, polymorphism, abstract and final classes, and methods, interfaces and inheritance, etc... In this article, \nwe will discuss Abstract Class and it's features related to the object-oriented techniques in PHP. Also, we will learn the implementation of Abstract Class along with few examples.\nBut, before diving too deep,let's learn how to define abstract class.\nWe can declare a class as abstract by affixing the name of the class with the abstract keyword. The definition is very clear, the class that contains abstract methods is known as abstract class. Abstract methods define in the abstract class just have name and arguments, and no other code.\nAn object of an abstract class can't be made. Rather, we need to extend child classes that compute the definition of the function into the bodies of the abstract methods in the child classes and utilize these child classes to create objects.\nLet's discuss some important facts about abstract classes in PHP:\ngetdata();\n?>\nTutorials Point\nadddata();\n?>\nFatal error: Uncaught Error: Cannot instantiate abstract class AbstractClass\nAll child class must define all the methods marked as abstract in the parent class, with all these methods need to be defined with the same signature or less restricted signature. Suppose In parent class if we define an abstract method with protected visibilty, in the child class execution it should be defined with protected aorpublic, but not with private.\naddValue();\n?>\nFatal errorAccess level to ConcreteClass::addValue() must be public (as in class AbstractBaseClass1)\nMethods declared as abstract simply declare the method's signature - they cannot define anybody inside them. Although the body can be present inside a non-abstract method.\nprintValue();\n?>\nPHP Fatal error: Abstract function ParentClass::printValue() cannot contain body\nAn abstract class can have abstract and non-abstract methods, but it must contain at least one abstract method. If it contains an abstract method then it should be declared as abstract.\nprintData();\n?>\nPHP Fatal error: Class AbstractClass contains 1 abstract method and must therefore be declared abstract or implement the remaining methods (AbstractClass::getValue)\nIt doesn't support multiple inheritance.\nwelcome();\n }\n }\n?>\nError\nHere we have declare SuperClass as an abstract class having a method test() and welcome() and, ClassA and ClassB and concrete classes extend from an abstract class. Then we have tried to create ClassC extending from ClassA and ClassB. As it is evident from the code, on calling the method welcome() using object ClassC, it’s impossible for the compiler to choose whether it has to call ClassA’s welcome() or ClassB’s welcome() method. So, to stay away from such complications, PHP does not support multiple inheritance.\nAn abstract class can extend another abstract class, Abstract class can provide the implementation of the interface."},"parsed":{"kind":"list like","value":[{"code":null,"e":1302,"s":1062,"text":"PHP5 comes with the object-oriented model with it, Some of the concepts of object-oriented model are: class, object, encapsulation, polymorphism, abstract and final classes, and methods, interfaces and inheritance, etc... In this article, "},{"code":null,"e":1483,"s":1302,"text":"we will discuss Abstract Class and it's features related to the object-oriented techniques in PHP. Also, we will learn the implementation of Abstract Class along with few examples."},{"code":null,"e":1553,"s":1483,"text":"But, before diving too deep,let's learn how to define abstract class."},{"code":null,"e":1843,"s":1553,"text":"We can declare a class as abstract by affixing the name of the class with the abstract keyword. The definition is very clear, the class that contains abstract methods is known as abstract class. Abstract methods define in the abstract class just have name and arguments, and no other code."},{"code":null,"e":2085,"s":1843,"text":"An object of an abstract class can't be made. Rather, we need to extend child classes that compute the definition of the function into the bodies of the abstract methods in the child classes and utilize these child classes to create objects."},{"code":null,"e":2151,"s":2085,"text":"Let's discuss some important facts about abstract classes in PHP:"},{"code":null,"e":2452,"s":2151,"text":"getdata();\n?>"},{"code":null,"e":2468,"s":2452,"text":"Tutorials Point"},{"code":null,"e":2684,"s":2468,"text":"adddata();\n?>"},{"code":null,"e":2761,"s":2684,"text":"Fatal error: Uncaught Error: Cannot instantiate abstract class AbstractClass"},{"code":null,"e":3121,"s":2761,"text":"All child class must define all the methods marked as abstract in the parent class, with all these methods need to be defined with the same signature or less restricted signature. Suppose In parent class if we define an abstract method with protected visibilty, in the child class execution it should be defined with protected aorpublic, but not with private."},{"code":null,"e":3532,"s":3121,"text":"addValue();\n?>"},{"code":null,"e":3633,"s":3532,"text":"Fatal errorAccess level to ConcreteClass::addValue() must be public (as in class AbstractBaseClass1)"},{"code":null,"e":3805,"s":3633,"text":"Methods declared as abstract simply declare the method's signature - they cannot define anybody inside them. Although the body can be present inside a non-abstract method."},{"code":null,"e":4117,"s":3805,"text":"printValue();\n?>"},{"code":null,"e":4198,"s":4117,"text":"PHP Fatal error: Abstract function ParentClass::printValue() cannot contain body"},{"code":null,"e":4384,"s":4198,"text":"An abstract class can have abstract and non-abstract methods, but it must contain at least one abstract method. If it contains an abstract method then it should be declared as abstract."},{"code":null,"e":4609,"s":4384,"text":"printData();\n?>"},{"code":null,"e":4774,"s":4609,"text":"PHP Fatal error: Class AbstractClass contains 1 abstract method and must therefore be declared abstract or implement the remaining methods (AbstractClass::getValue)"},{"code":null,"e":4815,"s":4774,"text":"It doesn't support multiple inheritance."},{"code":null,"e":5460,"s":4815,"text":"welcome();\n }\n }\n?>"},{"code":null,"e":5466,"s":5460,"text":"Error"},{"code":null,"e":5986,"s":5466,"text":"Here we have declare SuperClass as an abstract class having a method test() and welcome() and, ClassA and ClassB and concrete classes extend from an abstract class. Then we have tried to create ClassC extending from ClassA and ClassB. As it is evident from the code, on calling the method welcome() using object ClassC, it’s impossible for the compiler to choose whether it has to call ClassA’s welcome() or ClassB’s welcome() method. So, to stay away from such complications, PHP does not support multiple inheritance."},{"code":null,"e":6103,"s":5986,"text":"An abstract class can extend another abstract class, Abstract class can provide the implementation of the interface."}],"string":"[\n {\n \"code\": null,\n \"e\": 1302,\n \"s\": 1062,\n \"text\": \"PHP5 comes with the object-oriented model with it, Some of the concepts of object-oriented model are: class, object, encapsulation, polymorphism, abstract and final classes, and methods, interfaces and inheritance, etc... In this article, \"\n },\n {\n \"code\": null,\n \"e\": 1483,\n \"s\": 1302,\n \"text\": \"we will discuss Abstract Class and it's features related to the object-oriented techniques in PHP. Also, we will learn the implementation of Abstract Class along with few examples.\"\n },\n {\n \"code\": null,\n \"e\": 1553,\n \"s\": 1483,\n \"text\": \"But, before diving too deep,let's learn how to define abstract class.\"\n },\n {\n \"code\": null,\n \"e\": 1843,\n \"s\": 1553,\n \"text\": \"We can declare a class as abstract by affixing the name of the class with the abstract keyword. The definition is very clear, the class that contains abstract methods is known as abstract class. Abstract methods define in the abstract class just have name and arguments, and no other code.\"\n },\n {\n \"code\": null,\n \"e\": 2085,\n \"s\": 1843,\n \"text\": \"An object of an abstract class can't be made. Rather, we need to extend child classes that compute the definition of the function into the bodies of the abstract methods in the child classes and utilize these child classes to create objects.\"\n },\n {\n \"code\": null,\n \"e\": 2151,\n \"s\": 2085,\n \"text\": \"Let's discuss some important facts about abstract classes in PHP:\"\n },\n {\n \"code\": null,\n \"e\": 2452,\n \"s\": 2151,\n \"text\": \"getdata();\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 2468,\n \"s\": 2452,\n \"text\": \"Tutorials Point\"\n },\n {\n \"code\": null,\n \"e\": 2684,\n \"s\": 2468,\n \"text\": \"adddata();\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 2761,\n \"s\": 2684,\n \"text\": \"Fatal error: Uncaught Error: Cannot instantiate abstract class AbstractClass\"\n },\n {\n \"code\": null,\n \"e\": 3121,\n \"s\": 2761,\n \"text\": \"All child class must define all the methods marked as abstract in the parent class, with all these methods need to be defined with the same signature or less restricted signature. Suppose In parent class if we define an abstract method with protected visibilty, in the child class execution it should be defined with protected aorpublic, but not with private.\"\n },\n {\n \"code\": null,\n \"e\": 3532,\n \"s\": 3121,\n \"text\": \"addValue();\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 3633,\n \"s\": 3532,\n \"text\": \"Fatal errorAccess level to ConcreteClass::addValue() must be public (as in class AbstractBaseClass1)\"\n },\n {\n \"code\": null,\n \"e\": 3805,\n \"s\": 3633,\n \"text\": \"Methods declared as abstract simply declare the method's signature - they cannot define anybody inside them. Although the body can be present inside a non-abstract method.\"\n },\n {\n \"code\": null,\n \"e\": 4117,\n \"s\": 3805,\n \"text\": \"printValue();\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 4198,\n \"s\": 4117,\n \"text\": \"PHP Fatal error: Abstract function ParentClass::printValue() cannot contain body\"\n },\n {\n \"code\": null,\n \"e\": 4384,\n \"s\": 4198,\n \"text\": \"An abstract class can have abstract and non-abstract methods, but it must contain at least one abstract method. If it contains an abstract method then it should be declared as abstract.\"\n },\n {\n \"code\": null,\n \"e\": 4609,\n \"s\": 4384,\n \"text\": \"printData();\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 4774,\n \"s\": 4609,\n \"text\": \"PHP Fatal error: Class AbstractClass contains 1 abstract method and must therefore be declared abstract or implement the remaining methods (AbstractClass::getValue)\"\n },\n {\n \"code\": null,\n \"e\": 4815,\n \"s\": 4774,\n \"text\": \"It doesn't support multiple inheritance.\"\n },\n {\n \"code\": null,\n \"e\": 5460,\n \"s\": 4815,\n \"text\": \"welcome();\\n }\\n }\\n?>\"\n },\n {\n \"code\": null,\n \"e\": 5466,\n \"s\": 5460,\n \"text\": \"Error\"\n },\n {\n \"code\": null,\n \"e\": 5986,\n \"s\": 5466,\n \"text\": \"Here we have declare SuperClass as an abstract class having a method test() and welcome() and, ClassA and ClassB and concrete classes extend from an abstract class. Then we have tried to create ClassC extending from ClassA and ClassB. As it is evident from the code, on calling the method welcome() using object ClassC, it’s impossible for the compiler to choose whether it has to call ClassA’s welcome() or ClassB’s welcome() method. So, to stay away from such complications, PHP does not support multiple inheritance.\"\n },\n {\n \"code\": null,\n \"e\": 6103,\n \"s\": 5986,\n \"text\": \"An abstract class can extend another abstract class, Abstract class can provide the implementation of the interface.\"\n }\n]"}}},{"rowIdx":46,"cells":{"title":{"kind":"string","value":"Advanced Excel Financial - PMT Function"},"text":{"kind":"string","value":"The PMT function calculates the payment for a loan based on constant payments and a constant interest rate.\nPMT (rate, nper, pv, [fv], [type])\n\nThe present value, or the total amount that a series of future payments is worth now.\nAlso known as the principal.\nThe future value, or a cash balance you want to attain after the last payment is made.\nIf fv is omitted, it is assumed to be 0 (zero), that is, the future value of a loan is 0.\nThe number 0 (zero) or 1 and indicates when payments are due.\nLook at the Type-Payment Table below.\nThe payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans.\nThe payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans.\nMake sure that you are consistent about the units you use for specifying rate and nper\n\nIf you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\nIf you make annual payments on the same loan, use 12 percent for rate and 4 for nper\n\n\nMake sure that you are consistent about the units you use for specifying rate and nper\nIf you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\nIf you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\nIf you make annual payments on the same loan, use 12 percent for rate and 4 for nper\nIf you make annual payments on the same loan, use 12 percent for rate and 4 for nper\nTo find the total amount paid over the duration of the loan, multiply the returned PMT value by nper.\nTo find the total amount paid over the duration of the loan, multiply the returned PMT value by nper.\nIf the specified value of rate is less than or equal to -1, PMT returns #NUM! error value.\nIf the specified value of rate is less than or equal to -1, PMT returns #NUM! error value.\nIf the specified value of nper is equal to 0, PMT returns #NUM! error value.\nIf the specified value of nper is equal to 0, PMT returns #NUM! error value.\nIf any of the specified arguments is non-numeric, PMT returns #VALUE! error value.\nIf any of the specified arguments is non-numeric, PMT returns #VALUE! error value.\nExcel 2007, Excel 2010, Excel 2013, Excel 2016\n\n 296 Lectures \n 146 hours \n\n Arun Motoori\n\n 56 Lectures \n 5.5 hours \n\n Pavan Lalwani\n\n 120 Lectures \n 6.5 hours \n\n Inf Sid\n\n 134 Lectures \n 8.5 hours \n\n Yoda Learning\n\n 46 Lectures \n 7.5 hours \n\n William Fiset\n\n 25 Lectures \n 1.5 hours \n\n Sasha Miller\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":1962,"s":1854,"text":"The PMT function calculates the payment for a loan based on constant payments and a constant interest rate."},{"code":null,"e":1998,"s":1962,"text":"PMT (rate, nper, pv, [fv], [type])\n"},{"code":null,"e":2084,"s":1998,"text":"The present value, or the total amount that a series of future payments is worth now."},{"code":null,"e":2113,"s":2084,"text":"Also known as the principal."},{"code":null,"e":2200,"s":2113,"text":"The future value, or a cash balance you want to attain after the last payment is made."},{"code":null,"e":2290,"s":2200,"text":"If fv is omitted, it is assumed to be 0 (zero), that is, the future value of a loan is 0."},{"code":null,"e":2352,"s":2290,"text":"The number 0 (zero) or 1 and indicates when payments are due."},{"code":null,"e":2390,"s":2352,"text":"Look at the Type-Payment Table below."},{"code":null,"e":2523,"s":2390,"text":"The payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans."},{"code":null,"e":2656,"s":2523,"text":"The payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans."},{"code":null,"e":2960,"s":2656,"text":"Make sure that you are consistent about the units you use for specifying rate and nper\n\nIf you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\nIf you make annual payments on the same loan, use 12 percent for rate and 4 for nper\n\n"},{"code":null,"e":3047,"s":2960,"text":"Make sure that you are consistent about the units you use for specifying rate and nper"},{"code":null,"e":3176,"s":3047,"text":"If you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper"},{"code":null,"e":3305,"s":3176,"text":"If you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper"},{"code":null,"e":3390,"s":3305,"text":"If you make annual payments on the same loan, use 12 percent for rate and 4 for nper"},{"code":null,"e":3475,"s":3390,"text":"If you make annual payments on the same loan, use 12 percent for rate and 4 for nper"},{"code":null,"e":3577,"s":3475,"text":"To find the total amount paid over the duration of the loan, multiply the returned PMT value by nper."},{"code":null,"e":3679,"s":3577,"text":"To find the total amount paid over the duration of the loan, multiply the returned PMT value by nper."},{"code":null,"e":3770,"s":3679,"text":"If the specified value of rate is less than or equal to -1, PMT returns #NUM! error value."},{"code":null,"e":3861,"s":3770,"text":"If the specified value of rate is less than or equal to -1, PMT returns #NUM! error value."},{"code":null,"e":3938,"s":3861,"text":"If the specified value of nper is equal to 0, PMT returns #NUM! error value."},{"code":null,"e":4015,"s":3938,"text":"If the specified value of nper is equal to 0, PMT returns #NUM! error value."},{"code":null,"e":4098,"s":4015,"text":"If any of the specified arguments is non-numeric, PMT returns #VALUE! error value."},{"code":null,"e":4181,"s":4098,"text":"If any of the specified arguments is non-numeric, PMT returns #VALUE! error value."},{"code":null,"e":4228,"s":4181,"text":"Excel 2007, Excel 2010, Excel 2013, Excel 2016"},{"code":null,"e":4264,"s":4228,"text":"\n 296 Lectures \n 146 hours \n"},{"code":null,"e":4278,"s":4264,"text":" Arun Motoori"},{"code":null,"e":4313,"s":4278,"text":"\n 56 Lectures \n 5.5 hours \n"},{"code":null,"e":4328,"s":4313,"text":" Pavan Lalwani"},{"code":null,"e":4364,"s":4328,"text":"\n 120 Lectures \n 6.5 hours \n"},{"code":null,"e":4373,"s":4364,"text":" Inf Sid"},{"code":null,"e":4409,"s":4373,"text":"\n 134 Lectures \n 8.5 hours \n"},{"code":null,"e":4424,"s":4409,"text":" Yoda Learning"},{"code":null,"e":4459,"s":4424,"text":"\n 46 Lectures \n 7.5 hours \n"},{"code":null,"e":4474,"s":4459,"text":" William Fiset"},{"code":null,"e":4509,"s":4474,"text":"\n 25 Lectures \n 1.5 hours \n"},{"code":null,"e":4523,"s":4509,"text":" Sasha Miller"},{"code":null,"e":4530,"s":4523,"text":" Print"},{"code":null,"e":4541,"s":4530,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 1962,\n \"s\": 1854,\n \"text\": \"The PMT function calculates the payment for a loan based on constant payments and a constant interest rate.\"\n },\n {\n \"code\": null,\n \"e\": 1998,\n \"s\": 1962,\n \"text\": \"PMT (rate, nper, pv, [fv], [type])\\n\"\n },\n {\n \"code\": null,\n \"e\": 2084,\n \"s\": 1998,\n \"text\": \"The present value, or the total amount that a series of future payments is worth now.\"\n },\n {\n \"code\": null,\n \"e\": 2113,\n \"s\": 2084,\n \"text\": \"Also known as the principal.\"\n },\n {\n \"code\": null,\n \"e\": 2200,\n \"s\": 2113,\n \"text\": \"The future value, or a cash balance you want to attain after the last payment is made.\"\n },\n {\n \"code\": null,\n \"e\": 2290,\n \"s\": 2200,\n \"text\": \"If fv is omitted, it is assumed to be 0 (zero), that is, the future value of a loan is 0.\"\n },\n {\n \"code\": null,\n \"e\": 2352,\n \"s\": 2290,\n \"text\": \"The number 0 (zero) or 1 and indicates when payments are due.\"\n },\n {\n \"code\": null,\n \"e\": 2390,\n \"s\": 2352,\n \"text\": \"Look at the Type-Payment Table below.\"\n },\n {\n \"code\": null,\n \"e\": 2523,\n \"s\": 2390,\n \"text\": \"The payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans.\"\n },\n {\n \"code\": null,\n \"e\": 2656,\n \"s\": 2523,\n \"text\": \"The payment returned by PMT includes principal and interest but no taxes, reserve payments, or fees sometimes associated with loans.\"\n },\n {\n \"code\": null,\n \"e\": 2960,\n \"s\": 2656,\n \"text\": \"Make sure that you are consistent about the units you use for specifying rate and nper\\n\\nIf you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\\nIf you make annual payments on the same loan, use 12 percent for rate and 4 for nper\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 3047,\n \"s\": 2960,\n \"text\": \"Make sure that you are consistent about the units you use for specifying rate and nper\"\n },\n {\n \"code\": null,\n \"e\": 3176,\n \"s\": 3047,\n \"text\": \"If you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\"\n },\n {\n \"code\": null,\n \"e\": 3305,\n \"s\": 3176,\n \"text\": \"If you make monthly payments on a four-year loan at an annual interest rate of 12 percent, use 12%/12 for rate and 4*12 for nper\"\n },\n {\n \"code\": null,\n \"e\": 3390,\n \"s\": 3305,\n \"text\": \"If you make annual payments on the same loan, use 12 percent for rate and 4 for nper\"\n },\n {\n \"code\": null,\n \"e\": 3475,\n \"s\": 3390,\n \"text\": \"If you make annual payments on the same loan, use 12 percent for rate and 4 for nper\"\n },\n {\n \"code\": null,\n \"e\": 3577,\n \"s\": 3475,\n \"text\": \"To find the total amount paid over the duration of the loan, multiply the returned PMT value by nper.\"\n },\n {\n \"code\": null,\n \"e\": 3679,\n \"s\": 3577,\n \"text\": \"To find the total amount paid over the duration of the loan, multiply the returned PMT value by nper.\"\n },\n {\n \"code\": null,\n \"e\": 3770,\n \"s\": 3679,\n \"text\": \"If the specified value of rate is less than or equal to -1, PMT returns #NUM! error value.\"\n },\n {\n \"code\": null,\n \"e\": 3861,\n \"s\": 3770,\n \"text\": \"If the specified value of rate is less than or equal to -1, PMT returns #NUM! error value.\"\n },\n {\n \"code\": null,\n \"e\": 3938,\n \"s\": 3861,\n \"text\": \"If the specified value of nper is equal to 0, PMT returns #NUM! error value.\"\n },\n {\n \"code\": null,\n \"e\": 4015,\n \"s\": 3938,\n \"text\": \"If the specified value of nper is equal to 0, PMT returns #NUM! error value.\"\n },\n {\n \"code\": null,\n \"e\": 4098,\n \"s\": 4015,\n \"text\": \"If any of the specified arguments is non-numeric, PMT returns #VALUE! error value.\"\n },\n {\n \"code\": null,\n \"e\": 4181,\n \"s\": 4098,\n \"text\": \"If any of the specified arguments is non-numeric, PMT returns #VALUE! error value.\"\n },\n {\n \"code\": null,\n \"e\": 4228,\n \"s\": 4181,\n \"text\": \"Excel 2007, Excel 2010, Excel 2013, Excel 2016\"\n },\n {\n \"code\": null,\n \"e\": 4264,\n \"s\": 4228,\n \"text\": \"\\n 296 Lectures \\n 146 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4278,\n \"s\": 4264,\n \"text\": \" Arun Motoori\"\n },\n {\n \"code\": null,\n \"e\": 4313,\n \"s\": 4278,\n \"text\": \"\\n 56 Lectures \\n 5.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4328,\n \"s\": 4313,\n \"text\": \" Pavan Lalwani\"\n },\n {\n \"code\": null,\n \"e\": 4364,\n \"s\": 4328,\n \"text\": \"\\n 120 Lectures \\n 6.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4373,\n \"s\": 4364,\n \"text\": \" Inf Sid\"\n },\n {\n \"code\": null,\n \"e\": 4409,\n \"s\": 4373,\n \"text\": \"\\n 134 Lectures \\n 8.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4424,\n \"s\": 4409,\n \"text\": \" Yoda Learning\"\n },\n {\n \"code\": null,\n \"e\": 4459,\n \"s\": 4424,\n \"text\": \"\\n 46 Lectures \\n 7.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4474,\n \"s\": 4459,\n \"text\": \" William Fiset\"\n },\n {\n \"code\": null,\n \"e\": 4509,\n \"s\": 4474,\n \"text\": \"\\n 25 Lectures \\n 1.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 4523,\n \"s\": 4509,\n \"text\": \" Sasha Miller\"\n },\n {\n \"code\": null,\n \"e\": 4530,\n \"s\": 4523,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 4541,\n \"s\": 4530,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":47,"cells":{"title":{"kind":"string","value":"How to explain your ML model with SHAP | by Yifei Huang | Towards Data Science"},"text":{"kind":"string","value":"With exception of simple linear models like linear regression where you can easily look at the feature coefficients, machine learning models can often be a bit of a blackbox. It can be very difficult to understand why the model predicts a particular output, or to verify that the output makes intuitive sense. Model explainability is the practice that attempts to address this by\nDisaggregating and quantifying the key drivers of the model outputProviding users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances\nDisaggregating and quantifying the key drivers of the model output\nProviding users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances\nHumans tend to distrust that which we cannot understand. The inability to understand the model often leads to a lack of trust and adoption, resulting in potentially useful models sitting on the side lines. Even if the stakeholders and operators get over the initial hurdle of distrust, it is often not obvious how to operationalize the model output. Take a churn prediction model for example, the model may be able to tell you that a particular customer is 90% likely to churn, but without a clear understanding of the drivers, it’s not necessarily clear what can be done to prevent churn from happening.\nOf course, the magnitude of the hurdles depend on the specific use case. For certain classes of models like image recognition models (often deep learning based), it is very apparent if the output is right or wrong, and it is also fairly clear how to use the output. However, in many other use cases (like churn prediction, demand forecasting, credit underwriting, just to name a few), the lack of explainability poses significant obstacles between models and tangible impact. The most accurate model in the world is worthless if it is not being used to drive decisions and actions. Therefore it is crucial to make model as transparent and understandable to the stakeholders and operators, so that it can be leveraged and acted upon appropriately.\nThere are quite a few different approaches (some of which are model type specific) to help explain ML models. Of these, I like SHAP the most, for a few different reasons\nSHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final predictionSHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainerSHAP provides helpful visualizations to aid in the understanding and explanation of models\nSHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final prediction\nSHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainer\nSHAP provides helpful visualizations to aid in the understanding and explanation of models\nI won’t go into the details of how SHAP works underneath the hood, except to say that it leverages game theory concepts to optimally allocates the marginal contribution for each input feature. For more details, I encourage the readers to check out the related publications.\nI will instead focus on a hands on example of how to use SHAP to understand a churn prediction model. The dataset used in this example can be obtained from Kaggle.\nModel setup\nSince the model is not the main focus of this walk through, I won’t delve too much into the details, except to provide some quicks notes for the sake of clarity\nEach row in the dataset is a telco subscriber and contain metadata about the location, tenure, usage metrics, as well as a label of whether the subscriber has churned\nBefore model training, the dataset is pre-processed to convert boolean features into 1 and 0, categorical features into one-hot encoded dummies, and numerical features into Z-scores using the sklearn StandardScaler (remove mean and normalize by standard deviation)\nMinutes and charges features are found to be perfectly co-linear, so the minutes features are removed\nThe sklearn GradientBoostingClassifier is used to model the churn probability and GridSearchCV is used to optimized the hyper-parameters\nThe resulting model has a 96% accuracy in cross-validation\nThe code that performs the above is as follows\nExplaining aggregate feature impact with SHAP summary_plot\nWhile SHAP can be used to explain any model, it offers an optimized method for tree ensemble models (which GradientBoostingClassifier is) in TreeExplainer. With a couple of lines of code, you can quickly visualize the aggregate feature impact on the model output as follows\nexplainer = shap.TreeExplainer(gbt)shap_values = explainer.shap_values(processed_df[features])shap.summary_plot(shap_values, processed_df[features])\nThis chart contains a ton of information about the model at the aggregate level, but it may be a bit overwhelming for the uninitiated, so let me walk through what we are looking at\nThe individual dots represent specific training examples.The y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples.The x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability.SHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge\nThe individual dots represent specific training examples.\nThe y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples.\nThe x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability.\nSHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge\nWhat we can we learn from this plot\nSimilar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_planWe can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact.We can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not.\nSimilar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_plan\nWe can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact.\nWe can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not.\nExplaining specific feature impact with SHAP dependence_plot\nThe impact of international_plan is very curious: why would subscribers who have it be more likely to churn than those who do not? SHAP has nice method called dependence_plot to help users unpack this.\nshap.dependence_plot(\"international_plan\", shap_values, processed_df[features], interaction_index=\"total_intl_charge\")\nThe dependence plot is a deep dive into a specific feature to understand how the model output is impacted by different values of the feature, and how this is impacted by interaction with other features. Again, it can be a bit overwhelming for the uninitiated, so let me walk through it\nDots represent individual training examplesColors represent value of the interaction feature (total_intl_charge)y-axis is the SHAP value for the main feature being examined (international_plan)x-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)\nDots represent individual training examples\nColors represent value of the interaction feature (total_intl_charge)\ny-axis is the SHAP value for the main feature being examined (international_plan)\nx-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)\nWe can see, as before, those with international plan seems to have higher churn probability. Additionally, we can also see from the interaction feature of total international charge, that the red dots (higher total international charge) tends to have higher churn probability. Because of the bunching of points, it is difficult to make out what is happening, so let’s change the order of the two features to get a better look.\nshap.dependence_plot(\"total_intl_charge\", shap_values, processed_df[features], interaction_index=\"international_plan\")\nNow this plot tells a very interesting story.\nAs a reminder, the x-axis here is total international charge transformed to the z-score, 0 = the average of all subscribers in the data, non-zero values = standard deviations away from the average value. We can see that for those who have international charge less than 1 standard deviation above the average, having an international plan actually lowers churn impact of international charge (red dots to the left of 1 have lower SHAP value than blue dots). However, as soon as you cross to the right of 1 standard deviation of international charge, having international plan actually significantly increases the churn impact (red dots to the right of 1 have much higher SHAP value than blue dots)\nIt is not that people who have international plan are more likely churn, rather it is that people who have international plan and also high total international charge are a LOT more likely to churn.\nA plausible way to interpret this is that subscribers who have international plans expect to protected from high international charges, and when they are not, they are much more likely to cancel their subscription and go with a different provider who can offer better rates. This obviously requires additional investigation and perhaps also data collection to validate, but it is already a very interesting and actionable lead that can be pursued.\nExplaining individual examples with SHAP waterfall_plot\nIn addition to understanding drivers at an aggregate level, SHAP also enables you to examine individual examples and understand the drivers of the final prediction.\n# visualize the first prediction's explanation using waterfall# 2020-12-28 there is a bug in the current implementation of the waterfall_plot, where the data structured expected does not match the api output, hence the need for a custom classi=1001class ShapObject: def __init__(self, base_values, data, values, feature_names): self.base_values = base_values # Single value self.data = data # Raw feature values for 1 row of data self.values = values # SHAP values for the same row of data self.feature_names = feature_names # Column names shap_object = ShapObject(base_values = explainer.expected_value[0], values = shap_values[i,:], feature_names = features, data = processed_df[features].iloc[i,:])shap.waterfall_plot(shap_object)\nThis plot decomposes the drivers of a specific prediction.\nthe x-axis is the SHAP value (or log-odds ratio). At the very bottom E[f(x)] = -2.84 indicates the baseline log-odds ratio of churn for the population, which translates to a 5.5% churn probability using the formula provided above.\nthe y-axis is the name of features being represented by the arrows, along with their respective values.\nThe impact (SHAP value) of each individual feature (less significant features are lumped together) is represented by the arrows that move the log-odds ratio to the left and right, starting from the baseline value. Red arrows increase the log-odds ratio, and blue arrows reduce the log-odds ratio\nThis particular example has a final predicted log-odds ratio of -3.967 (or 1.8% churn probability) largely driven by relatively average total day charge, and the low number of customer service calls. Contrast this with the example below, where the final predicted log-odds ratio is 1.667 (or 84% churn probability), and is largely primarily by the very high number of customer service calls.\nML model explainability creates the ability for users to understand and quantify the drivers of the model predictions, both in the aggregate and for specific examples\nExplainability is a key component to getting models adopted and operationalized in an actionable way\nSHAP is a useful tool for quickly enabling model explainability\nHope this was a useful walk through. Feel free to reach out if you have comments or questions. Twitter | Linkedin | Medium"},"parsed":{"kind":"list like","value":[{"code":null,"e":552,"s":172,"text":"With exception of simple linear models like linear regression where you can easily look at the feature coefficients, machine learning models can often be a bit of a blackbox. It can be very difficult to understand why the model predicts a particular output, or to verify that the output makes intuitive sense. Model explainability is the practice that attempts to address this by"},{"code":null,"e":765,"s":552,"text":"Disaggregating and quantifying the key drivers of the model outputProviding users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances"},{"code":null,"e":832,"s":765,"text":"Disaggregating and quantifying the key drivers of the model output"},{"code":null,"e":979,"s":832,"text":"Providing users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances"},{"code":null,"e":1584,"s":979,"text":"Humans tend to distrust that which we cannot understand. The inability to understand the model often leads to a lack of trust and adoption, resulting in potentially useful models sitting on the side lines. Even if the stakeholders and operators get over the initial hurdle of distrust, it is often not obvious how to operationalize the model output. Take a churn prediction model for example, the model may be able to tell you that a particular customer is 90% likely to churn, but without a clear understanding of the drivers, it’s not necessarily clear what can be done to prevent churn from happening."},{"code":null,"e":2331,"s":1584,"text":"Of course, the magnitude of the hurdles depend on the specific use case. For certain classes of models like image recognition models (often deep learning based), it is very apparent if the output is right or wrong, and it is also fairly clear how to use the output. However, in many other use cases (like churn prediction, demand forecasting, credit underwriting, just to name a few), the lack of explainability poses significant obstacles between models and tangible impact. The most accurate model in the world is worthless if it is not being used to drive decisions and actions. Therefore it is crucial to make model as transparent and understandable to the stakeholders and operators, so that it can be leveraged and acted upon appropriately."},{"code":null,"e":2501,"s":2331,"text":"There are quite a few different approaches (some of which are model type specific) to help explain ML models. Of these, I like SHAP the most, for a few different reasons"},{"code":null,"e":3130,"s":2501,"text":"SHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final predictionSHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainerSHAP provides helpful visualizations to aid in the understanding and explanation of models"},{"code":null,"e":3269,"s":3130,"text":"SHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final prediction"},{"code":null,"e":3670,"s":3269,"text":"SHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainer"},{"code":null,"e":3761,"s":3670,"text":"SHAP provides helpful visualizations to aid in the understanding and explanation of models"},{"code":null,"e":4035,"s":3761,"text":"I won’t go into the details of how SHAP works underneath the hood, except to say that it leverages game theory concepts to optimally allocates the marginal contribution for each input feature. For more details, I encourage the readers to check out the related publications."},{"code":null,"e":4199,"s":4035,"text":"I will instead focus on a hands on example of how to use SHAP to understand a churn prediction model. The dataset used in this example can be obtained from Kaggle."},{"code":null,"e":4211,"s":4199,"text":"Model setup"},{"code":null,"e":4372,"s":4211,"text":"Since the model is not the main focus of this walk through, I won’t delve too much into the details, except to provide some quicks notes for the sake of clarity"},{"code":null,"e":4539,"s":4372,"text":"Each row in the dataset is a telco subscriber and contain metadata about the location, tenure, usage metrics, as well as a label of whether the subscriber has churned"},{"code":null,"e":4804,"s":4539,"text":"Before model training, the dataset is pre-processed to convert boolean features into 1 and 0, categorical features into one-hot encoded dummies, and numerical features into Z-scores using the sklearn StandardScaler (remove mean and normalize by standard deviation)"},{"code":null,"e":4906,"s":4804,"text":"Minutes and charges features are found to be perfectly co-linear, so the minutes features are removed"},{"code":null,"e":5043,"s":4906,"text":"The sklearn GradientBoostingClassifier is used to model the churn probability and GridSearchCV is used to optimized the hyper-parameters"},{"code":null,"e":5102,"s":5043,"text":"The resulting model has a 96% accuracy in cross-validation"},{"code":null,"e":5149,"s":5102,"text":"The code that performs the above is as follows"},{"code":null,"e":5208,"s":5149,"text":"Explaining aggregate feature impact with SHAP summary_plot"},{"code":null,"e":5482,"s":5208,"text":"While SHAP can be used to explain any model, it offers an optimized method for tree ensemble models (which GradientBoostingClassifier is) in TreeExplainer. With a couple of lines of code, you can quickly visualize the aggregate feature impact on the model output as follows"},{"code":null,"e":5631,"s":5482,"text":"explainer = shap.TreeExplainer(gbt)shap_values = explainer.shap_values(processed_df[features])shap.summary_plot(shap_values, processed_df[features])"},{"code":null,"e":5812,"s":5631,"text":"This chart contains a ton of information about the model at the aggregate level, but it may be a bit overwhelming for the uninitiated, so let me walk through what we are looking at"},{"code":null,"e":6963,"s":5812,"text":"The individual dots represent specific training examples.The y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples.The x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability.SHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge"},{"code":null,"e":7021,"s":6963,"text":"The individual dots represent specific training examples."},{"code":null,"e":7323,"s":7021,"text":"The y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples."},{"code":null,"e":7902,"s":7323,"text":"The x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability."},{"code":null,"e":8117,"s":7902,"text":"SHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge"},{"code":null,"e":8153,"s":8117,"text":"What we can we learn from this plot"},{"code":null,"e":8990,"s":8153,"text":"Similar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_planWe can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact.We can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not."},{"code":null,"e":9232,"s":8990,"text":"Similar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_plan"},{"code":null,"e":9557,"s":9232,"text":"We can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact."},{"code":null,"e":9829,"s":9557,"text":"We can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not."},{"code":null,"e":9890,"s":9829,"text":"Explaining specific feature impact with SHAP dependence_plot"},{"code":null,"e":10092,"s":9890,"text":"The impact of international_plan is very curious: why would subscribers who have it be more likely to churn than those who do not? SHAP has nice method called dependence_plot to help users unpack this."},{"code":null,"e":10211,"s":10092,"text":"shap.dependence_plot(\"international_plan\", shap_values, processed_df[features], interaction_index=\"total_intl_charge\")"},{"code":null,"e":10497,"s":10211,"text":"The dependence plot is a deep dive into a specific feature to understand how the model output is impacted by different values of the feature, and how this is impacted by interaction with other features. Again, it can be a bit overwhelming for the uninitiated, so let me walk through it"},{"code":null,"e":10809,"s":10497,"text":"Dots represent individual training examplesColors represent value of the interaction feature (total_intl_charge)y-axis is the SHAP value for the main feature being examined (international_plan)x-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)"},{"code":null,"e":10853,"s":10809,"text":"Dots represent individual training examples"},{"code":null,"e":10923,"s":10853,"text":"Colors represent value of the interaction feature (total_intl_charge)"},{"code":null,"e":11005,"s":10923,"text":"y-axis is the SHAP value for the main feature being examined (international_plan)"},{"code":null,"e":11124,"s":11005,"text":"x-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)"},{"code":null,"e":11551,"s":11124,"text":"We can see, as before, those with international plan seems to have higher churn probability. Additionally, we can also see from the interaction feature of total international charge, that the red dots (higher total international charge) tends to have higher churn probability. Because of the bunching of points, it is difficult to make out what is happening, so let’s change the order of the two features to get a better look."},{"code":null,"e":11670,"s":11551,"text":"shap.dependence_plot(\"total_intl_charge\", shap_values, processed_df[features], interaction_index=\"international_plan\")"},{"code":null,"e":11716,"s":11670,"text":"Now this plot tells a very interesting story."},{"code":null,"e":12414,"s":11716,"text":"As a reminder, the x-axis here is total international charge transformed to the z-score, 0 = the average of all subscribers in the data, non-zero values = standard deviations away from the average value. We can see that for those who have international charge less than 1 standard deviation above the average, having an international plan actually lowers churn impact of international charge (red dots to the left of 1 have lower SHAP value than blue dots). However, as soon as you cross to the right of 1 standard deviation of international charge, having international plan actually significantly increases the churn impact (red dots to the right of 1 have much higher SHAP value than blue dots)"},{"code":null,"e":12613,"s":12414,"text":"It is not that people who have international plan are more likely churn, rather it is that people who have international plan and also high total international charge are a LOT more likely to churn."},{"code":null,"e":13061,"s":12613,"text":"A plausible way to interpret this is that subscribers who have international plans expect to protected from high international charges, and when they are not, they are much more likely to cancel their subscription and go with a different provider who can offer better rates. This obviously requires additional investigation and perhaps also data collection to validate, but it is already a very interesting and actionable lead that can be pursued."},{"code":null,"e":13117,"s":13061,"text":"Explaining individual examples with SHAP waterfall_plot"},{"code":null,"e":13282,"s":13117,"text":"In addition to understanding drivers at an aggregate level, SHAP also enables you to examine individual examples and understand the drivers of the final prediction."},{"code":null,"e":14130,"s":13282,"text":"# visualize the first prediction's explanation using waterfall# 2020-12-28 there is a bug in the current implementation of the waterfall_plot, where the data structured expected does not match the api output, hence the need for a custom classi=1001class ShapObject: def __init__(self, base_values, data, values, feature_names): self.base_values = base_values # Single value self.data = data # Raw feature values for 1 row of data self.values = values # SHAP values for the same row of data self.feature_names = feature_names # Column names shap_object = ShapObject(base_values = explainer.expected_value[0], values = shap_values[i,:], feature_names = features, data = processed_df[features].iloc[i,:])shap.waterfall_plot(shap_object)"},{"code":null,"e":14189,"s":14130,"text":"This plot decomposes the drivers of a specific prediction."},{"code":null,"e":14420,"s":14189,"text":"the x-axis is the SHAP value (or log-odds ratio). At the very bottom E[f(x)] = -2.84 indicates the baseline log-odds ratio of churn for the population, which translates to a 5.5% churn probability using the formula provided above."},{"code":null,"e":14524,"s":14420,"text":"the y-axis is the name of features being represented by the arrows, along with their respective values."},{"code":null,"e":14820,"s":14524,"text":"The impact (SHAP value) of each individual feature (less significant features are lumped together) is represented by the arrows that move the log-odds ratio to the left and right, starting from the baseline value. Red arrows increase the log-odds ratio, and blue arrows reduce the log-odds ratio"},{"code":null,"e":15212,"s":14820,"text":"This particular example has a final predicted log-odds ratio of -3.967 (or 1.8% churn probability) largely driven by relatively average total day charge, and the low number of customer service calls. Contrast this with the example below, where the final predicted log-odds ratio is 1.667 (or 84% churn probability), and is largely primarily by the very high number of customer service calls."},{"code":null,"e":15379,"s":15212,"text":"ML model explainability creates the ability for users to understand and quantify the drivers of the model predictions, both in the aggregate and for specific examples"},{"code":null,"e":15480,"s":15379,"text":"Explainability is a key component to getting models adopted and operationalized in an actionable way"},{"code":null,"e":15544,"s":15480,"text":"SHAP is a useful tool for quickly enabling model explainability"}],"string":"[\n {\n \"code\": null,\n \"e\": 552,\n \"s\": 172,\n \"text\": \"With exception of simple linear models like linear regression where you can easily look at the feature coefficients, machine learning models can often be a bit of a blackbox. It can be very difficult to understand why the model predicts a particular output, or to verify that the output makes intuitive sense. Model explainability is the practice that attempts to address this by\"\n },\n {\n \"code\": null,\n \"e\": 765,\n \"s\": 552,\n \"text\": \"Disaggregating and quantifying the key drivers of the model outputProviding users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances\"\n },\n {\n \"code\": null,\n \"e\": 832,\n \"s\": 765,\n \"text\": \"Disaggregating and quantifying the key drivers of the model output\"\n },\n {\n \"code\": null,\n \"e\": 979,\n \"s\": 832,\n \"text\": \"Providing users tools to intuitively reason about why and how the model inputs lead to the output, both in the aggregate and in specific instances\"\n },\n {\n \"code\": null,\n \"e\": 1584,\n \"s\": 979,\n \"text\": \"Humans tend to distrust that which we cannot understand. The inability to understand the model often leads to a lack of trust and adoption, resulting in potentially useful models sitting on the side lines. Even if the stakeholders and operators get over the initial hurdle of distrust, it is often not obvious how to operationalize the model output. Take a churn prediction model for example, the model may be able to tell you that a particular customer is 90% likely to churn, but without a clear understanding of the drivers, it’s not necessarily clear what can be done to prevent churn from happening.\"\n },\n {\n \"code\": null,\n \"e\": 2331,\n \"s\": 1584,\n \"text\": \"Of course, the magnitude of the hurdles depend on the specific use case. For certain classes of models like image recognition models (often deep learning based), it is very apparent if the output is right or wrong, and it is also fairly clear how to use the output. However, in many other use cases (like churn prediction, demand forecasting, credit underwriting, just to name a few), the lack of explainability poses significant obstacles between models and tangible impact. The most accurate model in the world is worthless if it is not being used to drive decisions and actions. Therefore it is crucial to make model as transparent and understandable to the stakeholders and operators, so that it can be leveraged and acted upon appropriately.\"\n },\n {\n \"code\": null,\n \"e\": 2501,\n \"s\": 2331,\n \"text\": \"There are quite a few different approaches (some of which are model type specific) to help explain ML models. Of these, I like SHAP the most, for a few different reasons\"\n },\n {\n \"code\": null,\n \"e\": 3130,\n \"s\": 2501,\n \"text\": \"SHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final predictionSHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainerSHAP provides helpful visualizations to aid in the understanding and explanation of models\"\n },\n {\n \"code\": null,\n \"e\": 3269,\n \"s\": 3130,\n \"text\": \"SHAP is consistent, meaning it provides an exact decomposition of the impact each driver that can be summed to obtain the final prediction\"\n },\n {\n \"code\": null,\n \"e\": 3670,\n \"s\": 3269,\n \"text\": \"SHAP unifies 6 different approaches (including LIME and DeepLIFT) [2] to provide a unified interface for explaining all kinds of different models. Specifically, it has TreeExplainer for tree based (including ensemble) models, DeepExplainer for deep learning models, GradientExplainer for internal layers to deep learning models, LinearExplainer for linear models, and a model agnostic KernelExplainer\"\n },\n {\n \"code\": null,\n \"e\": 3761,\n \"s\": 3670,\n \"text\": \"SHAP provides helpful visualizations to aid in the understanding and explanation of models\"\n },\n {\n \"code\": null,\n \"e\": 4035,\n \"s\": 3761,\n \"text\": \"I won’t go into the details of how SHAP works underneath the hood, except to say that it leverages game theory concepts to optimally allocates the marginal contribution for each input feature. For more details, I encourage the readers to check out the related publications.\"\n },\n {\n \"code\": null,\n \"e\": 4199,\n \"s\": 4035,\n \"text\": \"I will instead focus on a hands on example of how to use SHAP to understand a churn prediction model. The dataset used in this example can be obtained from Kaggle.\"\n },\n {\n \"code\": null,\n \"e\": 4211,\n \"s\": 4199,\n \"text\": \"Model setup\"\n },\n {\n \"code\": null,\n \"e\": 4372,\n \"s\": 4211,\n \"text\": \"Since the model is not the main focus of this walk through, I won’t delve too much into the details, except to provide some quicks notes for the sake of clarity\"\n },\n {\n \"code\": null,\n \"e\": 4539,\n \"s\": 4372,\n \"text\": \"Each row in the dataset is a telco subscriber and contain metadata about the location, tenure, usage metrics, as well as a label of whether the subscriber has churned\"\n },\n {\n \"code\": null,\n \"e\": 4804,\n \"s\": 4539,\n \"text\": \"Before model training, the dataset is pre-processed to convert boolean features into 1 and 0, categorical features into one-hot encoded dummies, and numerical features into Z-scores using the sklearn StandardScaler (remove mean and normalize by standard deviation)\"\n },\n {\n \"code\": null,\n \"e\": 4906,\n \"s\": 4804,\n \"text\": \"Minutes and charges features are found to be perfectly co-linear, so the minutes features are removed\"\n },\n {\n \"code\": null,\n \"e\": 5043,\n \"s\": 4906,\n \"text\": \"The sklearn GradientBoostingClassifier is used to model the churn probability and GridSearchCV is used to optimized the hyper-parameters\"\n },\n {\n \"code\": null,\n \"e\": 5102,\n \"s\": 5043,\n \"text\": \"The resulting model has a 96% accuracy in cross-validation\"\n },\n {\n \"code\": null,\n \"e\": 5149,\n \"s\": 5102,\n \"text\": \"The code that performs the above is as follows\"\n },\n {\n \"code\": null,\n \"e\": 5208,\n \"s\": 5149,\n \"text\": \"Explaining aggregate feature impact with SHAP summary_plot\"\n },\n {\n \"code\": null,\n \"e\": 5482,\n \"s\": 5208,\n \"text\": \"While SHAP can be used to explain any model, it offers an optimized method for tree ensemble models (which GradientBoostingClassifier is) in TreeExplainer. With a couple of lines of code, you can quickly visualize the aggregate feature impact on the model output as follows\"\n },\n {\n \"code\": null,\n \"e\": 5631,\n \"s\": 5482,\n \"text\": \"explainer = shap.TreeExplainer(gbt)shap_values = explainer.shap_values(processed_df[features])shap.summary_plot(shap_values, processed_df[features])\"\n },\n {\n \"code\": null,\n \"e\": 5812,\n \"s\": 5631,\n \"text\": \"This chart contains a ton of information about the model at the aggregate level, but it may be a bit overwhelming for the uninitiated, so let me walk through what we are looking at\"\n },\n {\n \"code\": null,\n \"e\": 6963,\n \"s\": 5812,\n \"text\": \"The individual dots represent specific training examples.The y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples.The x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability.SHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge\"\n },\n {\n \"code\": null,\n \"e\": 7021,\n \"s\": 6963,\n \"text\": \"The individual dots represent specific training examples.\"\n },\n {\n \"code\": null,\n \"e\": 7323,\n \"s\": 7021,\n \"text\": \"The y-axis are the input features ranked by magnitude of aggregate impact on the model output. The colors of the dots represent the value of the feature on the y-axis. Note that this does not mean the top feature is total_day_charge for every subscriber, we will get to explaining individual examples.\"\n },\n {\n \"code\": null,\n \"e\": 7902,\n \"s\": 7323,\n \"text\": \"The x-axis are the SHAP values, which as the chart indicates, are the impacts on the model output. These are the values that you would sum to get the final model output for any specific example. In this particularly case, since we are working with a classifier, they correspond to the log-odds ratio. A 0 means no marginal impact to the probability, positive value means increases to the churn probability, and negative value means decreases to the churn probability. The exact relationship between overall odds-ratio and probability is log(p/(1-p)), where p is the probability.\"\n },\n {\n \"code\": null,\n \"e\": 8117,\n \"s\": 7902,\n \"text\": \"SHAP adds a bit of perturbation to the vertical positions of points when there is a large number of points occupying the same space to help convey the high density. See the large bulb of points for total_day_charge\"\n },\n {\n \"code\": null,\n \"e\": 8153,\n \"s\": 8117,\n \"text\": \"What we can we learn from this plot\"\n },\n {\n \"code\": null,\n \"e\": 8990,\n \"s\": 8153,\n \"text\": \"Similar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_planWe can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact.We can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not.\"\n },\n {\n \"code\": null,\n \"e\": 9232,\n \"s\": 8990,\n \"text\": \"Similar to what you can get from traditional feature importance plots from classifiers, we can see that the top 5 drivers of churn are total_day_charge, number_customer_service_calls, international_plan, total_eve_charge, and voice_mail_plan\"\n },\n {\n \"code\": null,\n \"e\": 9557,\n \"s\": 9232,\n \"text\": \"We can see how each of the feature impact churn probability — total_day_charge impact is asymmetrical and primarily drives up churn probability its value is high, but does not drive down churn probability to the same extent when its value is low. Contrast this with total_eve_charge which has a much more symmetrical impact.\"\n },\n {\n \"code\": null,\n \"e\": 9829,\n \"s\": 9557,\n \"text\": \"We can also see that subscribers who have international_plan are much more likely to churn than those who do not (red dots are far out on the right and blues dots are close to 0). Conversely, those who have voice_mail_plan are much less likely to churn than those do not.\"\n },\n {\n \"code\": null,\n \"e\": 9890,\n \"s\": 9829,\n \"text\": \"Explaining specific feature impact with SHAP dependence_plot\"\n },\n {\n \"code\": null,\n \"e\": 10092,\n \"s\": 9890,\n \"text\": \"The impact of international_plan is very curious: why would subscribers who have it be more likely to churn than those who do not? SHAP has nice method called dependence_plot to help users unpack this.\"\n },\n {\n \"code\": null,\n \"e\": 10211,\n \"s\": 10092,\n \"text\": \"shap.dependence_plot(\\\"international_plan\\\", shap_values, processed_df[features], interaction_index=\\\"total_intl_charge\\\")\"\n },\n {\n \"code\": null,\n \"e\": 10497,\n \"s\": 10211,\n \"text\": \"The dependence plot is a deep dive into a specific feature to understand how the model output is impacted by different values of the feature, and how this is impacted by interaction with other features. Again, it can be a bit overwhelming for the uninitiated, so let me walk through it\"\n },\n {\n \"code\": null,\n \"e\": 10809,\n \"s\": 10497,\n \"text\": \"Dots represent individual training examplesColors represent value of the interaction feature (total_intl_charge)y-axis is the SHAP value for the main feature being examined (international_plan)x-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)\"\n },\n {\n \"code\": null,\n \"e\": 10853,\n \"s\": 10809,\n \"text\": \"Dots represent individual training examples\"\n },\n {\n \"code\": null,\n \"e\": 10923,\n \"s\": 10853,\n \"text\": \"Colors represent value of the interaction feature (total_intl_charge)\"\n },\n {\n \"code\": null,\n \"e\": 11005,\n \"s\": 10923,\n \"text\": \"y-axis is the SHAP value for the main feature being examined (international_plan)\"\n },\n {\n \"code\": null,\n \"e\": 11124,\n \"s\": 11005,\n \"text\": \"x-axis is the value of the main feature being examined (international_plan, 0 for does not have plan, 1 for have plan)\"\n },\n {\n \"code\": null,\n \"e\": 11551,\n \"s\": 11124,\n \"text\": \"We can see, as before, those with international plan seems to have higher churn probability. Additionally, we can also see from the interaction feature of total international charge, that the red dots (higher total international charge) tends to have higher churn probability. Because of the bunching of points, it is difficult to make out what is happening, so let’s change the order of the two features to get a better look.\"\n },\n {\n \"code\": null,\n \"e\": 11670,\n \"s\": 11551,\n \"text\": \"shap.dependence_plot(\\\"total_intl_charge\\\", shap_values, processed_df[features], interaction_index=\\\"international_plan\\\")\"\n },\n {\n \"code\": null,\n \"e\": 11716,\n \"s\": 11670,\n \"text\": \"Now this plot tells a very interesting story.\"\n },\n {\n \"code\": null,\n \"e\": 12414,\n \"s\": 11716,\n \"text\": \"As a reminder, the x-axis here is total international charge transformed to the z-score, 0 = the average of all subscribers in the data, non-zero values = standard deviations away from the average value. We can see that for those who have international charge less than 1 standard deviation above the average, having an international plan actually lowers churn impact of international charge (red dots to the left of 1 have lower SHAP value than blue dots). However, as soon as you cross to the right of 1 standard deviation of international charge, having international plan actually significantly increases the churn impact (red dots to the right of 1 have much higher SHAP value than blue dots)\"\n },\n {\n \"code\": null,\n \"e\": 12613,\n \"s\": 12414,\n \"text\": \"It is not that people who have international plan are more likely churn, rather it is that people who have international plan and also high total international charge are a LOT more likely to churn.\"\n },\n {\n \"code\": null,\n \"e\": 13061,\n \"s\": 12613,\n \"text\": \"A plausible way to interpret this is that subscribers who have international plans expect to protected from high international charges, and when they are not, they are much more likely to cancel their subscription and go with a different provider who can offer better rates. This obviously requires additional investigation and perhaps also data collection to validate, but it is already a very interesting and actionable lead that can be pursued.\"\n },\n {\n \"code\": null,\n \"e\": 13117,\n \"s\": 13061,\n \"text\": \"Explaining individual examples with SHAP waterfall_plot\"\n },\n {\n \"code\": null,\n \"e\": 13282,\n \"s\": 13117,\n \"text\": \"In addition to understanding drivers at an aggregate level, SHAP also enables you to examine individual examples and understand the drivers of the final prediction.\"\n },\n {\n \"code\": null,\n \"e\": 14130,\n \"s\": 13282,\n \"text\": \"# visualize the first prediction's explanation using waterfall# 2020-12-28 there is a bug in the current implementation of the waterfall_plot, where the data structured expected does not match the api output, hence the need for a custom classi=1001class ShapObject: def __init__(self, base_values, data, values, feature_names): self.base_values = base_values # Single value self.data = data # Raw feature values for 1 row of data self.values = values # SHAP values for the same row of data self.feature_names = feature_names # Column names shap_object = ShapObject(base_values = explainer.expected_value[0], values = shap_values[i,:], feature_names = features, data = processed_df[features].iloc[i,:])shap.waterfall_plot(shap_object)\"\n },\n {\n \"code\": null,\n \"e\": 14189,\n \"s\": 14130,\n \"text\": \"This plot decomposes the drivers of a specific prediction.\"\n },\n {\n \"code\": null,\n \"e\": 14420,\n \"s\": 14189,\n \"text\": \"the x-axis is the SHAP value (or log-odds ratio). At the very bottom E[f(x)] = -2.84 indicates the baseline log-odds ratio of churn for the population, which translates to a 5.5% churn probability using the formula provided above.\"\n },\n {\n \"code\": null,\n \"e\": 14524,\n \"s\": 14420,\n \"text\": \"the y-axis is the name of features being represented by the arrows, along with their respective values.\"\n },\n {\n \"code\": null,\n \"e\": 14820,\n \"s\": 14524,\n \"text\": \"The impact (SHAP value) of each individual feature (less significant features are lumped together) is represented by the arrows that move the log-odds ratio to the left and right, starting from the baseline value. Red arrows increase the log-odds ratio, and blue arrows reduce the log-odds ratio\"\n },\n {\n \"code\": null,\n \"e\": 15212,\n \"s\": 14820,\n \"text\": \"This particular example has a final predicted log-odds ratio of -3.967 (or 1.8% churn probability) largely driven by relatively average total day charge, and the low number of customer service calls. Contrast this with the example below, where the final predicted log-odds ratio is 1.667 (or 84% churn probability), and is largely primarily by the very high number of customer service calls.\"\n },\n {\n \"code\": null,\n \"e\": 15379,\n \"s\": 15212,\n \"text\": \"ML model explainability creates the ability for users to understand and quantify the drivers of the model predictions, both in the aggregate and for specific examples\"\n },\n {\n \"code\": null,\n \"e\": 15480,\n \"s\": 15379,\n \"text\": \"Explainability is a key component to getting models adopted and operationalized in an actionable way\"\n },\n {\n \"code\": null,\n \"e\": 15544,\n \"s\": 15480,\n \"text\": \"SHAP is a useful tool for quickly enabling model explainability\"\n }\n]"}}},{"rowIdx":48,"cells":{"title":{"kind":"string","value":"CouchDB - Quick Guide"},"text":{"kind":"string","value":"Database management system provides mechanism for storage and retrieval of data. There are three main types of database management systems namely RDBMS (Relational Database management Systems), OLAP (Online Analytical\nProcessing Systems) and NoSQL.\nRDBMS stands for Relational Database Management System. RDBMS is the basis for SQL, and for all modern database systems like MS SQL Server, IBM DB2, Oracle, MySQL, and Microsoft Access.\nA Relational database management system (RDBMS) is a database management system (DBMS) that is based on the relational model as introduced by E. F. Codd.\nThe data in RDBMS is stored in database objects called tables. The table is a collection of related data entries and it consists of columns and rows. It stores only structured data.\nOnline Analytical Processing Server (OLAP) is based on the multidimensional data model. It allows managers and analysts to get an insight of the information through fast, consistent, and interactive access to information.\nA NoSQL database (sometimes called as Not Only SQL) is a database that provides a mechanism to store and retrieve data other than the tabular relations used in relational databases. These databases are schema-free, support easy replication, have simple API, eventually consistent, and can handle huge amounts of data (big data).\nThe primary objective of a NoSQL database is to have the following −\nSimplicity of design,\nHorizontal scaling, and\nFiner control over availability.\nNoSQL databases use different data structures compared to relational databases. It makes some operations faster in NoSQL. The suitability of a given NoSQL database depends on the problem it must solve. These databases store both structured data and unstructured data like audio files, video files, documents, etc. These NoSQL databases are classified into three types and they are explained below.\nKey-value Store − These databases are designed for storing data in key-value pairs and these databases will not have any schema. In these databases, each data value consists of an indexed key and a value for that key.\nExamples − BerkeleyDB, Cassandra, DynamoDB, Riak.\nColumn Store − In these databases, data is stored in cells grouped in columns of data, and these columns are further grouped into Column families. These column families can contain any number of columns.\nExamples − BigTable, HBase, and HyperTable.\nDocument Store − These are the databases developed on the basic idea of key-value stores where \"documents\" contain more complex data. Here, each document is assigned a unique key, which is used to retrieve the document. These are designed for storing, retrieving, and managing document-oriented information, also known as semi-structured data.\nExamples − CouchDB and MongoDB.\nCouchDB is an open source database developed by Apache software foundation. The focus is on the ease of use, embracing the web. It is a NoSQL document store database.\nIt uses JSON, to store data (documents), java script as its query language to transform the documents, http protocol for api to access the documents, query the indices with the web browser. It is a multi master application released in 2005 and it became an apache project in 2008.\nCouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use.\nCouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use.\nAs we store data in the flexible document-based structure, there is no need to worry about the structure of the data.\nAs we store data in the flexible document-based structure, there is no need to worry about the structure of the data.\nUsers are provided with powerful data mapping, which allows querying, combining, and filtering the information.\nUsers are provided with powerful data mapping, which allows querying, combining, and filtering the information.\nCouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines.\nCouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines.\nDatabase is the outermost data structure/container in CouchDB.\nDatabase is the outermost data structure/container in CouchDB.\nEach database is a collection of independent documents.\nEach database is a collection of independent documents.\nEach document maintains its own data and self-contained schema.\nEach document maintains its own data and self-contained schema.\nDocument metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected.\nDocument metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected.\nCouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes.\nCouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes.\nCouchDB is a document storage NoSQL database. It provides the facility of storing documents with unique names, and it also provides an API called RESTful HTTP API for reading and updating (add, edit, delete) database documents.\nIn CouchDB, documents are the primary unit of data and they also include metadata. Document fields are uniquely named and contain values of varying types (text, number, Boolean, lists, etc.), and there is no set limit to text size or element count.\nDocument updates (add, edit, delete) follow Atomicity, i.e., they will be saved completely or not saved at all. The database will not have any partially saved or edited documents. \n{\n \"field\" : \"value\",\n \"field\" : \"value\",\n \"field\" : \"value\",\n}\n\nCouchDB contains ACID properties as one of its features.\nConsistency − When the data in CouchDB was once committed, then this data will not be modified or overwritten. Thus, CouchDB ensures that the database file will always be in a consistent state.\nA multi-Version Concurrency Control (MVCC) model is used by CouchDB reads, because of which the client will see a consistent snapshot of the database from the beginning to the end of the read operation.\nWhenever a documents is updated, CouchDB flushes the data into the disk, and the updated database header is written in two consecutive and identical chunks to make up the first 4k of the file, and then synchronously flushed to disk. Partial updates during the flush will be discarded.\nIf the failure occurred while committing the header, a surviving copy of the previous identical headers will remain, ensuring coherency of all previously committed data. Except the header area, consistency checks or fix-ups after a crash or a power failure are never necessary.\nWhenever the space in the database file got wasted above certain extent, all the active data will be copied (cloned) to a new file. When the copying process is entirely done, then the old file will be discarded. All this is done by compaction process. The database remains online during the compaction and all updates and reads are allowed to complete successfully.\nData in CouchDB is stored in semi-structured documents that are flexible with individual implicit structures, but it is a simple document model for data storage and sharing. If we want see our data in many different ways, we need a way to filter, organize and report on data that hasn’t been decomposed into tables.\nTo solve this problem, CouchDB provides a view model. Views are the method of aggregating and reporting on the documents in a database, and are built on-demand to aggregate, join and report on database documents. Because views are built dynamically and don’t affect the underlying document, you can have as many different view representations of the same data as you like.\nCouchDB was written in Erlang programming language.\nIt was started by Damien Katz in 2005.\nCouchDB became an Apache project in 2008.\nThe current version of CouchDB is 1.61.\nThis chapter teaches you how to install CouchDB in windows as well as Linux systems.\nThe official website for CouchDB is https://couchdb.apache.org. If you click the given link, you can get the home page of the CouchDB official website as shown below.\nIf you click on the download button that will lead to a page where download links of CouchDB in various formats are provided. The following snapshot illustrates the same.\nChoose the download link for windows systems and select one of the provided mirrors to start your download.\nCouchDB will be downloaded to your system in the form of setup file named setup-couchdb-1.6.1_R16B02.exe. Run the setup file and proceed with the\ninstallation.\nAfter installation, open built-in web interface of CouchDB by visiting the following \nlink: http://127.0.0.1:5984/. If everything goes fine, this will give you a web page, which have the following output.\n{\n \"couchdb\":\"Welcome\",\"uuid\":\"c8d48ac61bb497f4692b346e0f400d60\",\n \"version\":\"1.6.1\",\n \"vendor\":{\n \"version\":\"1.6.1\",\"name\":\"The Apache Software Foundation\"\n }\n}\nYou can interact with the CouchDB web interface by using the following url −\nhttp://127.0.0.1:5984/_utils/\n\nThis shows you the index page of Futon, which is the web interface of CouchDB.\nFor many of the Linux flavored systems, they provide CouchDB internally. To install this CouchDB follow the instructions.\nOn Ubuntu and Debian you can use −\nsudo aptitude install couchdb\nOn Gentoo Linux there is a CouchDB ebuild available −\nsudo emerge couchdb\nIf your Linux system does not have CouchDB, follow the next section to install CouchDB and its dependencies.\nFollowing is the list of dependencies that are to be installed to get CouchDB in your system−\nErlang OTP\nICU\nOpenSSL\nMozilla SpiderMonkey\nGNU Make\nGNU Compiler Collection\nlibcurl\nhelp2man\nPython for docs\nPython Sphinx\nTo install these dependencies, type the following commands in the terminal. Here we are using Centos 6.5 and the following commands will install the required softwares compatible to Centos 6.5.\n$sudo yum install autoconf\n$sudo yum install autoconf-archive\n$sudo yum install automake\n$sudo yum install curl-devel\n$sudo yum install erlang-asn1\n$sudo yum install erlang-erts\n$sudo yum install erlang-eunit\n$sudo yum install erlang-os_mon\n$sudo yum install erlang-xmerl\n$sudo yum install help2man\n$sudo yum install js-devel\n$sudo yum install libicu-devel\n$sudo yum install libtool\n$sudo yum install perl-Test-Harness\nNote − For all these commands you need to use sudo. The following procedure converts a normal user to a sudoer.\nLogin as root in admin user\nLogin as root in admin user\nOpen sudo file using the following command −\nOpen sudo file using the following command −\nvisudo\nThen edit as shown below to give your existing user the sudoer privileges −\nHadoop All=(All) All , and press esc : x to write the changes to the file. \nAfter downloading all the dependencies in your system, download CouchDB following the given instructions.\nApache software foundation will not provide the complete .tar file for CouchDB,\nso you have to install it from the source. \nCreate a new directory to install CouchDB, browse to such created directory and download CouchDB source by executing the following commands −\n$ cd\n$ mkdir CouchDB\n$ cd CouchDB/\n$ wget\nhttp://www.google.com/url?q=http%3A%2F%2Fwww.apache.org%2Fdist%2Fcouchdb%2Fsource%2F1.6.1%2Fapache-couchdb-1.6.1.tar.gz\nThis will download CouchDB source file into your system. Now unzip the apache-couchdb-1.6.1.tar.gz as shown below.\n$ tar zxvf apache-couchdb-1.6.1.tar.gz\nTo configure CouchDB, do the following −\nBrowse to the home folder of CouchDB.\nLogin as superuser.\nConfigure using ./configure prompt as shown below −\n$ cd apache-couchdb-1.6.1\n$ su\nPassword:\n# ./configure --with-erlang=/usr/lib64/erlang/usr/include/\nIt gives you the following output similar to that shown below with a concluding\nline saying − You have configured Apache CouchDB, time to relax.\n# ./configure --with-erlang=/usr/lib64/erlang/usr/include/\n\nchecking for a BSD-compatible install... /usr/bin/install -c\nchecking whether build environment is sane... yes\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\nchecking for gawk... gawk\nchecking whether make sets $(MAKE)... yes\nchecking how to create a ustar tar archive... gnutar\n.................................................................\n............................\nconfig.status: creating var/Makefile\nconfig.status: creating config.h\nconfig.status: config.h is unchanged\nconfig.status: creating src/snappy/google-snappy/config.h\nconfig.status: src/snappy/google-snappy/config.h is unchanged\nconfig.status: executing depfiles commands\nconfig.status: executing libtool commands\n\nYou have configured Apache CouchDB, time to relax.\n\nRun `make && sudo make install' to install.\nNow type the following command to install CouchDB in your system.\n# make && sudo make install\nIt installs CouchDB in your system with a concluding line saying − You have installed Apache CouchDB, time to relax.\nTo start CouchDB, browse to the CouchDB home folder and use the following command −\n$ cd apache-couchdb-1.6.1\n$ cd etc\n$ couchdb start\nIt starts CouchDB giving the following output: −\nApache CouchDB 1.6.1 (LogLevel=info) is starting.\nApache CouchDB has started. Time to relax.\n[info] [lt;0.31.0gt;] Apache CouchDB has started on http://127.0.0.1:5984/\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET / 200\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET /favicon.ico 200\n\nSince CouchDB is a web interface, try to type the following homepage url in the browser.\nhttp://127.0.0.1:5984/\nIt produces the following output −\n{\n \"couchdb\":\"Welcome\",\n \"uuid\":\"8f0d59acd0e179f5e9f0075fa1f5e804\",\n \"version\":\"1.6.1\",\n \"vendor\":{\n \"name\":\"The Apache Software Foundation\",\n \"version\":\"1.6.1\"\n }\n}\n\ncURL utility is a way to communicate with CouchDB.\nIt is a tool to transfer data from or to a server, using one of the supported protocols (HTTP, HTTPS, FTP, FTPS, TFTP, DICT, TELNET, LDAP or FILE). The command is designed to work without user interaction. cURL offers a busload of useful tricks like proxy support, user authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file transfer resume and more.\nThe cURL utility is available in operating systems such as UNIX, Linux, Mac OS X and Windows. It is a command line utility using which user can access HTTP protocol straight away from the command line. This chapter teaches you how to use cURL utility.\nYou can access any website using cURL utility by simply typing cURL followed by the website address as shown below −\ncurl www.tutorialspoint.com/\nBy default, the cURL utility returns the source code of the requested page. It displays this code on the terminal window.\ncURL utility provides various options to work with, and you can see them in cURL utility help.\nThe following code shows some portion of cURL help.\n$ curl --help\nUsage: curl [options...] \nOptions: (H) means HTTP/HTTPS only, (F) means FTP only\n --anyauth Pick \"any\" authentication method (H)\n -a/--append Append to target file when uploading (F/SFTP)\n --basic Use HTTP Basic Authentication (H)\n --cacert CA certificate to verify peer against (SSL)\n-d/--data HTTP POST data (H)\n --data-ascii HTTP POST ASCII data (H)\n --data-binary HTTP POST binary data (H)\n --data-urlencode HTTP POST data\nurlencoded (H)\n --delegation STRING GSS-API delegation permission\n --digest Use HTTP Digest Authentication (H)\n --disable-eprt Inhibit using EPRT or LPRT (F)\n --disable-epsv Inhibit using EPSV (F)\n\n -F/--form Specify HTTP multipart POST data (H)\n --form-string Specify HTTP multipart POST data (H)\n --ftp-account Account data to send when requested by server\n(F)\n --ftp-alternative-to-user String to replace \"USER [name]\" (F)\n --ftp-create-dirs Create the remote dirs if not present (F)\n --ftp-method [multi cwd/no cwd/single cwd] Control CWD usage (F)\n --ftp-pasv Use PASV/EPSV instead of PORT (F)\n\n -G/--get Send the -d data with a HTTP GET (H)\n\n -H/--header Custom header to pass to server (H)\n -I/--head Show document info only\n -h/--help This help text\n --hostpubmd5 Hex encoded MD5 string of the host public key.\n(SSH)\n -0/--http1.0 Use HTTP 1.0 (H)\n --ignore-content-length Ignore the HTTP Content-Length header\n -i/--include Include protocol headers in the output (H/F)\n\n -M/--manual Display the full manual\n\n -o/--output Write output to instead of stdout\n --pass Pass phrase for the private key (SSL/SSH)\n --post301 Do not switch to GET after following a 301\nredirect (H)\n --post302 Do not switch to GET after following a 302\nredirect (H)\n -O/--remote-name Write output to a file named as the remote file\n --remote-name-all Use the remote file name for all URLs\n -R/--remote-time Set the remote file's time on the local output\n -X/--request Specify request command to use\n --retry Retry request times if transient problems\noccur\n --retry-delay When retrying, wait this many seconds\nbetween each\n --retry-max-time Retry only within this period\n -T/--upload-file Transfer to remote site\n --url Set URL to work with\n -B/--use-ascii Use ASCII/text transfer\nWhile communicating with CouchDB, certain options of cURL utility were extensively used. Following are the brief descriptions of some important options of cURL utility including those used by CouchDB.\n(HTTP) Specifies a custom request method used when communicating with the HTTP server. The specified request is used instead of the method otherwise used (which defaults to GET). Read the HTTP 1.1 specification for details and explanations.\n(FTP) Specifies a custom FTP command to use instead of LIST when doing file lists with ftp.\n(HTTP) Extra header is used when getting a web page. Note that if you add a custom header that has the same name as one of the internal ones cURL would\nuse, your externally set header will be used instead of the internal one. This allows you to make even trickier work than cURL would normally do. You should not replace internally set headers without perfectly knowing what you’re doing. Replacing an internal header with the one without content on the right side of the colon, will prevent that header from appearing.\ncURL assures that each header you add/replace get sent with the proper end of line marker. Neither you should add that as a part of the header content nor add newlines or carriage returns to disorder things.\nSee also the -A/--user-agent and -e/--referer options.\nThis option can be used multiple times to add/replace/remove multiple headers.\nUsing this flag of cURL, you can send data along with the HTTP POST request to the server, as if it was filled by the user in the form and submitted.\nExample\nSuppose there is a website and you want to login into it or send some data to the website using –d flag of cURL utility as shown below.\ncurl -X PUT http://mywebsite.com/login.html -d userid=001 -d password=tutorialspoint\nIt sends a post chunk that looks like \"userid=001&password=tutorialspoint\". Likewise you can also send documents (JSON ) using -d flag.\nUsing this flag, cURL writes the output of the request to a file.\nExample\nThe following example shows the use of -o flag of cURL utility.\n$ curl -o example.html www.tutorialspoint.com/index.htm \n% Total % Received % Xferd Average Speed Time Time Time Current \n Dload Upload Total Spent Left Speed\n100 81193 0 81193 0 0 48168 0 --:--:-- 0:00:01 --:--:--\n58077\nThis gets the source code of the homepage of tutorialspoint.com, creates a file named example.com and saves the output in the file named example.html.\nFollowing is the snapshot of the example.html.\nThis flag is similar to –o, the only difference is with this flag, a new file with the same name as the requested url was created, and the source code of the requested url will be copied to it.\nExample\nThe following example shows the use of -O flag of cURL utility.\n$ curl -O www.tutorialspoint.com/index.htm\n% Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left\nSpeed\n100 81285 0 81285 0 0 49794 0 --:--:-- 0:00:01 --:--:--\n60077\nIt creates a new file with the name index.htm and saves the source code of the index page of tutorialspoint.com in it.\nYou can access the homepage of the CouchDB by sending a GET request to the CouchDB instance installed. First of all make sure you have installed CouchDB in your Linux environment and it is running successfully, and then use the following syntax to send a get request to the CouchDB instance.\ncurl http://127.0.0.1:5984/\nThis gives you a JSON document as shown below where CouchDB specifies the details such as version number, name of the vendor, and version of the software.\n$ curl http://127.0.0.1:5984/\n{\n \"couchdb\" : \"Welcome\",\n \"uuid\" : \"8f0d59acd0e179f5e9f0075fa1f5e804\",\n \"version\" : \"1.6.1\",\n \"vendor\" : {\n \"name\":\"The Apache Software Foundation\",\n \"version\":\"1.6.1\"\n }\n}\n\nYou can get the list of all the databases created, by sending a get request along with the string \"_all_dbs string \". Following is the syntax to get the list of all databases in CouchDB.\ncurl -X GET http://127.0.0.1:5984/_all_dbs\nIt gives you the list of all databases in CouchDB as shown below.\n$ curl -X GET http://127.0.0.1:5984/_all_dbs\n[ \"_replicator\" , \"_users\" ]\nYou can create a database in CouchDB using cURL with PUT header using the following syntax −\n$ curl -X PUT http://127.0.0.1:5984/database_name\n\nAs an example, using the above given syntax create a database with name my_database as shown below.\n$ curl -X PUT http://127.0.0.1:5984/my_database\n{\"ok\":true}\nVerify whether the database is created, by listing out all the databases as shown\nbelow. Here you can observe the name of newly created database, \"my_database\" in the list\n$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \"_users\" , \"my_database\" ]\nYou can get the information about database using the get request along with the database name. Following is the syntax to get the database information.\nAs an example let us get the information of the database named my_database as shown below. Here you can get the information about your database as a response.\n$ curl -X GET http://127.0.0.1:5984/my_database\n\n{\n \"db_name\" : \"my_database\",\n \"doc_count\" : 0,\n \"doc_del_count\" : 0,\n \"update_seq\" : 0,\n \"purge_seq\" : 0,\n \"compact_running\" : false,\n \"disk_size\" : 79,\n \"data_size\" : 0,\n \"instance_start_time\" : \"1423628520835029\",\n \"disk_format_version\" : 6,\n \"committed_update_seq\" : 0\n }\nFuton is the built-in, web based, administration interface of CouchDB. It provides\na simple graphical interface using which you can interact with CouchDB. It is a naive interface and it provides full access to all CouchDB features. Following is the list of those features −\nCreates databases.\nDestroys databases.\nCreates documents.\nUpdates documents.\nEdits documents.\nDeletes documents.\nMake sure CouchDB is running and then open the following url in browser −\nhttp://127.0.0.1:5984/_utils/\nIf you open this url, it displays the Futon home page as shown below −\nOn the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user.\nOn the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user.\nOn the right hand side you can see the following −\n\nTools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\nDocumentation − This section contains the complete documentation for the recent version of CouchDB.\nDiagnostics − Under this you can verify the installation of CouchDB.\nRecent Databases − Under this you can find the names of recently added databases.\n\n\nOn the right hand side you can see the following −\nTools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\nTools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\nDocumentation − This section contains the complete documentation for the recent version of CouchDB.\nDocumentation − This section contains the complete documentation for the recent version of CouchDB.\nDiagnostics − Under this you can verify the installation of CouchDB.\nDiagnostics − Under this you can verify the installation of CouchDB.\nRecent Databases − Under this you can find the names of recently added databases.\nRecent Databases − Under this you can find the names of recently added databases.\nUsing HTTP request headers, you can communicate with CouchDB. Through these requests we can retrieve data from the database, store data in to the database in the form of documents, and we can view as well as format the documents stored in a database.\nWhile communicating with the database we will use different request formats like get, head, post, put, delete, and copy. For all operations in CouchDB, the input data and the output data structures will be in the form of JavaScript Object Notation (JSON) object.\nFollowing are the different request formats of HTTP Protocol used to communicate with CouchDB.\nGET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases).\nGET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases).\nHEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response.\nHEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response.\nPOST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands.\nPOST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands.\nPUT − Using PUT request, you can create new objects, databases, documents, views and design documents.\nPUT − Using PUT request, you can create new objects, databases, documents, views and design documents.\nDELETE − Using DELETE request, you can delete documents, views, and design documents.\nDELETE − Using DELETE request, you can delete documents, views, and design documents.\nCOPY − Using COPY method, you can copy documents and objects.\nCOPY − Using COPY method, you can copy documents and objects.\nHTTP headers should be supplied to get the right format and encoding. While sending the request to the CouchDB server, you can send Http request headers along with the request. Following are the different Http request headers.\nContent-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended.\nContent-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended.\nAccept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons.\nThough, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client.\nAccept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons.\nThough, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client.\nThese are the headers of the response sent by the server. These headers give information about the content send by the server as response.\nContent-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain.\nContent-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain.\nCache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible.\nCache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible.\nContent-length − This header returns the length of the content sent by the server, in bytes.\nContent-length − This header returns the length of the content sent by the server, in bytes.\nEtag − This header is used to show the revision for a document, or a view.\nEtag − This header is used to show the revision for a document, or a view.\nFollowing is the tabular form of the status code sent by the http header and the description of it.\n200 − OK\nThis status will be issued when a request completed successfully.\n201 − Created\nThis status will be issued when a document is created.\n202 − Accepted\nThis status will be issued when a request is accepted.\n404 − Not Found\nThis status will be issued when the server is unable to find the requested content.\n405 − Resource Not Allowed\nThis status is issued when the HTTP request type used is invalid.\n409 − Conflict\nThis status is issued whenever there is any update conflict.\n415 − Bad Content Type\nThis status indicated that the requested content type is not supported by the server.\n500 − Internal Server Error\nThis status is issued whenever the data sent in the request is invalid.\nThere are certain url paths using which, you can interact with the database directly. Following is the tabular format of such url paths.\nPUT /db\nThis url is used to create a new database.\nGET /db\nThis url is used to get the information about the existing database.\nPUT /db/document\nThis url is used to create a document/update an existing document.\nGET /db/document\nThis url is used to get the document.\nDELETE /db/document\nThis url is used to delete the specified document from the specified database.\nGET /db/_design/design-doc\nThis url is used to get the definition of a design document.\nGET /db/_design/designdoc/_view/view-name\nThis url is used to access the view, view-name from the design document from the specified database.\nDatabase is the outermost data structure in CouchDB where your documents are stored. You can create these databases using cURL utility provided by CouchDB, as well as Futon the web interface of CouchDB.\nYou can create a database in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a database −\n$ curl -X PUT http://127.0.0.1:5984/database name\n\nUsing −X we can specify HTTP custom request method to be used. In this case, we are using PUT method. When we use the PUT operation/method, the content of the url specifies the object name we are creating using HTTP request. Here we have to send the name of the database using put request in the url to create a database.\nUsing the above given syntax if you want to create a database with name my_database, you can create it as follows\ncurl -X PUT http://127.0.0.1:5984/my_database\n{\n \"ok\":true\n}\n\nAs a response the server will return you a JSON document with content “ok” − true indicating the operation was successful.\nVerify whether the database is created, by listing out all the databases as shown below. Here you can observe the name of a newly created database, \" my_database \" in the list.\n$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \" _users \" , \" my_database \" ]\nTo create a database open the http://127.0.0.1:5984/_utils/. You will get\nan Overview/index page of CouchDB as shown below.\nIn this page, you can see the list of databases in CouchDB, an option button Create Database on the left hand side.\nNow click on the create database link. You can see a popup window Create New Databases asking for the database name for the new database. Choose any name following the mentioned criteria. Here we are creating another database with name tutorials_point. Click on the create button as shown in the following screenshot.\nYou can delete a database in CouchDB by sending a request to the server using DELETE method through cURL utility. Following is the syntax to create a database −\n$ curl -X DELETE http://127.0.0.1:5984/database name\n\nUsing −X we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using the DELETE method. Send the url to the server by specifying the database to be deleted in it.\nAssume there is a database named my_database2 in CouchDB. Using the above given syntax if you want to delete it, you can do it as follows −\n$ curl -X DELETE http://127.0.0.1:5984/my_database2\n{\n \"ok\" : true\n}\nAs a response, the server will return you a JSON document with content “ok” − true indicating the operation was successful.\nVerify whether the database is deleted by listing out all the databases as shown below. Here you can observe the name of the deleted database, \"my_database\" is not there in the list.\n$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \" _users \" ]\nTo delete a database, open the http://127.0.0.1:5984/_utils/ url where you will get an Overview/index page of CouchDB as shown below.\nHere you can see three user created databases. Let us delete the database named tutorials_point2. To delete a database, select one from the list of databases, and click on it, which will lead to the overview page of the selected database where you can see the various operations on databases. The following screenshot shows the same −\nAmong them you can find Delete Database option. By clicking on it you will get a popup window, asking whether you are sure! Click on delete, to delete the selected database.\nDocuments are CouchDB’s central data structure. Contents of the database will be stored in the form of Documents instead of tables. You can create these documents using cURL utility provided by CouchDB, as well as Futon. This chapter covers the ways to create a document in a database.\nEach document in CouchDB has a unique ID. You can choose your own ID that should be in the form of a string. Generally, UUID (Universally Unique IDentifier) is used, which are random numbers that have least chance of creating a duplicate. These are preferred to avoid collisions.\nYou can create a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a document.\n$ curl -X PUT http://127.0.0.1:5984/database name/\"id\" -d ' { document} '\nUsing −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using PUT method. When we use the PUT method, the content of the url specifies the object name we are creating using the HTTP request. Here we have to send the following −\nThe name of the database name in which we are creating the document.\nThe name of the database name in which we are creating the document.\nThe document id.\nThe document id.\nThe data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −\nThe data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −\n{\n Name : Raju\n age : 23\n Designation : Designer\n}\nUsing the above given syntax if you want to create a document with id 001 in a database with name my_database, you can create it as shown below.\n$ curl -X PUT http://127.0.0.1:5984/my_database/\"001\" -d\n'{ \" Name \" : \" Raju \" , \" age \" :\" 23 \" , \" Designation \" : \" Designer \" }'\n\n{\"ok\":true,\"id\":\"001\",\"rev\":\"1-1c2fae390fa5475d9b809301bbf3f25e\"}\nThe response of CouchDB to this request contains three fields −\n\"ok\", indicating the operation was successful.\n\"ok\", indicating the operation was successful.\n\"id\", which stores the id of the document and\n\"id\", which stores the id of the document and\n\"rev\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control.\n\"rev\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control.\nIf you want to view the created document you can get it using the document as shown below.\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"1-3fcc78daac7a90803f0a5e383f4f1e1e\",\n \"Name\": \"Raju\",\n \"age\": 23,\n \"Designation\": \"Designer\"\n}\nTo Create a document open the http://127.0.0.1:5984/_utils/ url to get an Overview/index page of CouchDB as shown below.\nSelect the database in which you want to create the document. Open the Overview page of the database and select New Document option as shown below.\nWhen you select the New Document option, CouchDB creates a new database document, assigning it a new id. You can edit the value of the id and can assign your own value in the form of a string. In the following illustration, we have created a new document with an id 001.\nIn this page, you can observe three options − save Document, Add Field and Upload Attachment.\nTo add field to the document click on Add Field option. After creating a database, you can add a field to it using this option. Clicking on it will get you a pair of text boxes, namely, Field, value. You can edit these values by clicking on them. Edit those values and type your desired Field-Value pair. Click on the green button to save these values.\nIn the following illustration, we have created three fields Name, age and, Designation of the employee.\nYou can save the changes made to the document by clicking on this option. After saving, a new id _rev will be generated as shown below.\nYou can update a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to update a document.\ncurl -X PUT http://127.0.0.1:5984/database_name/document_id/ -d '{ \"field\" : \"value\", \"_rev\" : \"revision id\" }'\nSuppose there is a document with id 001 in the database named my_database. You can delete this as shown below.\nFirst of all, get the revision id of the document that is to be updated. You can find the _rev of the document in the document itself, therefore get the document as shown below.\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\" : \"001\",\n \"_rev\" : \"2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \"age\" : \"23\"\n}\nUse revision id _rev from the document to update the document. Here we are updating the age from 23 to 24.\n$ curl -X PUT http://127.0.0.1:5984/my_database/001/ -d\n' { \" age \" : \" 24 \" , \" _rev \" : \" 1-1c2fae390fa5475d9b809301bbf3f25e \" } '\n\n{ \" ok \" : true , \" id \" : \" 001 \" , \" rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" }\nTo verify the document, get the document again using GET request as shown below.\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \" _id \" : \" 001 \",\n \" _rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \" age \" : \" 23 \"\n }\n\nFollowing are some important points to be noted while updating a document.\nThe URL we send in the request containing the database name and the document id.\nThe URL we send in the request containing the database name and the document id.\nUpdating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID.\nUpdating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID.\nWe have to supply the revision number as a part of the JSON request.\nWe have to supply the revision number as a part of the JSON request.\nIn return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number.\nIn return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number.\nTo delete a document open the http://127.0.0.1:5984/_utils/ url to get an\nOverview/index page of CouchDB as shown below.\nSelect the database in which the document to be updated exists and click it. Here we are updating a document in the database named tutorials_point. You will get the list of documents in the database as shown below.\nSelect a document that you want to update and click on it. You will get the contents of the documents as shown below.\nHere, to update the location from Delhi to Hyderabad, click on the text box, edit the field, and click the green button to save the changes as shown below.\nYou can delete a document in CouchDB by sending an HTTP request to the server using DELETE method through cURL utility. Following is the syntax to delete a document.\ncurl -X DELETE http : // 127.0.0.1:5984 / database name/database id?_rev id\nUsing −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using Delete method. To delete a database /database_name/database_id/ is not enough. You have to pass the recent revision id through the url. To mention attributes of any data structure \"?\" is used.\nSuppose there is a document in database named my_database with document id 001. To delete this document, you have to get the rev id of the document. Get the document data as shown below.\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \" _id \" : \" 001 \",\n \" _rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \" age \" : \" 23 \"\n}\nNow specify the revision id of the document to be deleted, id of the document, and database name the document belongs to, as shown below −\n$ curl -X DELETE http://127.0.0.1:5984/my_database/001?rev=1-\n3fcc78daac7a90803f0a5e383f4f1e1e\n\n{\"ok\":true,\"id\":\"001\",\"rev\":\"2-3a561d56de1ce3305d693bd15630bf96\"}\nTo verify whether the document is deleted, try to fetch the document by using the GET method. Since you are fetching a deleted document, this will give you an error message as shown below −\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\"error\":\"not_found\",\"reason\":\"deleted\"}\n\nFirst of all, verify the documents in the database. Following is the snapshot of the database named tutorials_point.\nHere you can observe, the database consists of three documents. To delete any of the documents say 003, do the following −\nClick on the document, you will get a page showing the contents of selected document in the form of field-value pairs.\nClick on the document, you will get a page showing the contents of selected document in the form of field-value pairs.\nThis page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document.\nThis page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document.\nClick on Delete Document option.\nClick on Delete Document option.\nYou will get a dialog box saying \"Are you sure you want to delete this document?\" Click on delete, to delete the document.\nYou will get a dialog box saying \"Are you sure you want to delete this document?\" Click on delete, to delete the document.\nYou can attach files to CouchDB just like email. The file contains metadata like name and includes its MIME type, and the number of bytes the attachment contains. To attach files to a document you have to send PUT request to the server. Following is the syntax to attach files to the document −\n$ curl -vX PUT http://127.0.0.1:5984/database_name/database_id\n/filename?rev=document rev_id --data-binary @filename -H \"Content-Type:\ntype of the content\"\nThe request has various options that are explained below.\n--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body.\n--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body.\n-H − This option is used to mention the content type of the file we are going to upload.\n-H − This option is used to mention the content type of the file we are going to upload.\nLet us attach a file named boy.jpg, to the document with id 001, in the database named my_database by sending PUT request to CouchDB. Before that, you have to fetch the data of the document with id 001 to get its current rev id as shown below.\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"1-967a00dff5e02add41819138abb3284d\"\n}\nNow using the _rev value, send the PUT request to the CouchDB server as shown below.\n$ curl -vX PUT http://127.0.0.1:5984/my_database/001/boy.jpg?rev=1-\n967a00dff5e02add41819138abb3284d --data-binary @boy.jpg -H \"ContentType:\nimage/jpg\"\nTo verify whether the attachment is uploaded, fetch the document content as shown below−\n$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"2-4705a219cdcca7c72aac4f623f5c46a8\",\n \"_attachments\": {\n \"boy.jpg\": {\n \"content_type\": \"image/jpg\",\n \"revpos\": 2,\n \"digest\": \"md5-9Swz8jvmga5mfBIsmCxCtQ==\",\n \"length\": 91408,\n \"stub\": true\n }\n }\n}\n\nUsing this option, you can upload a new attachment such as a file, image, or document, to the database. To do so, click on the Upload Attachment button. A dialog box will appear where you can choose the file to be uploaded. Select the file and click on the Upload button.\nThe file uploaded will be displayed under _attachments field. Later you can see the file by clicking on it.\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":2087,"s":1838,"text":"Database management system provides mechanism for storage and retrieval of data. There are three main types of database management systems namely RDBMS (Relational Database management Systems), OLAP (Online Analytical\nProcessing Systems) and NoSQL."},{"code":null,"e":2273,"s":2087,"text":"RDBMS stands for Relational Database Management System. RDBMS is the basis for SQL, and for all modern database systems like MS SQL Server, IBM DB2, Oracle, MySQL, and Microsoft Access."},{"code":null,"e":2427,"s":2273,"text":"A Relational database management system (RDBMS) is a database management system (DBMS) that is based on the relational model as introduced by E. F. Codd."},{"code":null,"e":2609,"s":2427,"text":"The data in RDBMS is stored in database objects called tables. The table is a collection of related data entries and it consists of columns and rows. It stores only structured data."},{"code":null,"e":2831,"s":2609,"text":"Online Analytical Processing Server (OLAP) is based on the multidimensional data model. It allows managers and analysts to get an insight of the information through fast, consistent, and interactive access to information."},{"code":null,"e":3160,"s":2831,"text":"A NoSQL database (sometimes called as Not Only SQL) is a database that provides a mechanism to store and retrieve data other than the tabular relations used in relational databases. These databases are schema-free, support easy replication, have simple API, eventually consistent, and can handle huge amounts of data (big data)."},{"code":null,"e":3229,"s":3160,"text":"The primary objective of a NoSQL database is to have the following −"},{"code":null,"e":3251,"s":3229,"text":"Simplicity of design,"},{"code":null,"e":3275,"s":3251,"text":"Horizontal scaling, and"},{"code":null,"e":3308,"s":3275,"text":"Finer control over availability."},{"code":null,"e":3706,"s":3308,"text":"NoSQL databases use different data structures compared to relational databases. It makes some operations faster in NoSQL. The suitability of a given NoSQL database depends on the problem it must solve. These databases store both structured data and unstructured data like audio files, video files, documents, etc. These NoSQL databases are classified into three types and they are explained below."},{"code":null,"e":3924,"s":3706,"text":"Key-value Store − These databases are designed for storing data in key-value pairs and these databases will not have any schema. In these databases, each data value consists of an indexed key and a value for that key."},{"code":null,"e":3974,"s":3924,"text":"Examples − BerkeleyDB, Cassandra, DynamoDB, Riak."},{"code":null,"e":4178,"s":3974,"text":"Column Store − In these databases, data is stored in cells grouped in columns of data, and these columns are further grouped into Column families. These column families can contain any number of columns."},{"code":null,"e":4222,"s":4178,"text":"Examples − BigTable, HBase, and HyperTable."},{"code":null,"e":4566,"s":4222,"text":"Document Store − These are the databases developed on the basic idea of key-value stores where \"documents\" contain more complex data. Here, each document is assigned a unique key, which is used to retrieve the document. These are designed for storing, retrieving, and managing document-oriented information, also known as semi-structured data."},{"code":null,"e":4598,"s":4566,"text":"Examples − CouchDB and MongoDB."},{"code":null,"e":4765,"s":4598,"text":"CouchDB is an open source database developed by Apache software foundation. The focus is on the ease of use, embracing the web. It is a NoSQL document store database."},{"code":null,"e":5046,"s":4765,"text":"It uses JSON, to store data (documents), java script as its query language to transform the documents, http protocol for api to access the documents, query the indices with the web browser. It is a multi master application released in 2005 and it became an apache project in 2008."},{"code":null,"e":5242,"s":5046,"text":"CouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use."},{"code":null,"e":5438,"s":5242,"text":"CouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use."},{"code":null,"e":5556,"s":5438,"text":"As we store data in the flexible document-based structure, there is no need to worry about the structure of the data."},{"code":null,"e":5674,"s":5556,"text":"As we store data in the flexible document-based structure, there is no need to worry about the structure of the data."},{"code":null,"e":5786,"s":5674,"text":"Users are provided with powerful data mapping, which allows querying, combining, and filtering the information."},{"code":null,"e":5898,"s":5786,"text":"Users are provided with powerful data mapping, which allows querying, combining, and filtering the information."},{"code":null,"e":6030,"s":5898,"text":"CouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines."},{"code":null,"e":6162,"s":6030,"text":"CouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines."},{"code":null,"e":6225,"s":6162,"text":"Database is the outermost data structure/container in CouchDB."},{"code":null,"e":6288,"s":6225,"text":"Database is the outermost data structure/container in CouchDB."},{"code":null,"e":6344,"s":6288,"text":"Each database is a collection of independent documents."},{"code":null,"e":6400,"s":6344,"text":"Each database is a collection of independent documents."},{"code":null,"e":6464,"s":6400,"text":"Each document maintains its own data and self-contained schema."},{"code":null,"e":6528,"s":6464,"text":"Each document maintains its own data and self-contained schema."},{"code":null,"e":6674,"s":6528,"text":"Document metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected."},{"code":null,"e":6820,"s":6674,"text":"Document metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected."},{"code":null,"e":6934,"s":6820,"text":"CouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes."},{"code":null,"e":7048,"s":6934,"text":"CouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes."},{"code":null,"e":7276,"s":7048,"text":"CouchDB is a document storage NoSQL database. It provides the facility of storing documents with unique names, and it also provides an API called RESTful HTTP API for reading and updating (add, edit, delete) database documents."},{"code":null,"e":7525,"s":7276,"text":"In CouchDB, documents are the primary unit of data and they also include metadata. Document fields are uniquely named and contain values of varying types (text, number, Boolean, lists, etc.), and there is no set limit to text size or element count."},{"code":null,"e":7706,"s":7525,"text":"Document updates (add, edit, delete) follow Atomicity, i.e., they will be saved completely or not saved at all. The database will not have any partially saved or edited documents. "},{"code":null,"e":7777,"s":7706,"text":"{\n \"field\" : \"value\",\n \"field\" : \"value\",\n \"field\" : \"value\",\n}\n"},{"code":null,"e":7834,"s":7777,"text":"CouchDB contains ACID properties as one of its features."},{"code":null,"e":8028,"s":7834,"text":"Consistency − When the data in CouchDB was once committed, then this data will not be modified or overwritten. Thus, CouchDB ensures that the database file will always be in a consistent state."},{"code":null,"e":8231,"s":8028,"text":"A multi-Version Concurrency Control (MVCC) model is used by CouchDB reads, because of which the client will see a consistent snapshot of the database from the beginning to the end of the read operation."},{"code":null,"e":8516,"s":8231,"text":"Whenever a documents is updated, CouchDB flushes the data into the disk, and the updated database header is written in two consecutive and identical chunks to make up the first 4k of the file, and then synchronously flushed to disk. Partial updates during the flush will be discarded."},{"code":null,"e":8794,"s":8516,"text":"If the failure occurred while committing the header, a surviving copy of the previous identical headers will remain, ensuring coherency of all previously committed data. Except the header area, consistency checks or fix-ups after a crash or a power failure are never necessary."},{"code":null,"e":9160,"s":8794,"text":"Whenever the space in the database file got wasted above certain extent, all the active data will be copied (cloned) to a new file. When the copying process is entirely done, then the old file will be discarded. All this is done by compaction process. The database remains online during the compaction and all updates and reads are allowed to complete successfully."},{"code":null,"e":9476,"s":9160,"text":"Data in CouchDB is stored in semi-structured documents that are flexible with individual implicit structures, but it is a simple document model for data storage and sharing. If we want see our data in many different ways, we need a way to filter, organize and report on data that hasn’t been decomposed into tables."},{"code":null,"e":9849,"s":9476,"text":"To solve this problem, CouchDB provides a view model. Views are the method of aggregating and reporting on the documents in a database, and are built on-demand to aggregate, join and report on database documents. Because views are built dynamically and don’t affect the underlying document, you can have as many different view representations of the same data as you like."},{"code":null,"e":9901,"s":9849,"text":"CouchDB was written in Erlang programming language."},{"code":null,"e":9940,"s":9901,"text":"It was started by Damien Katz in 2005."},{"code":null,"e":9982,"s":9940,"text":"CouchDB became an Apache project in 2008."},{"code":null,"e":10022,"s":9982,"text":"The current version of CouchDB is 1.61."},{"code":null,"e":10107,"s":10022,"text":"This chapter teaches you how to install CouchDB in windows as well as Linux systems."},{"code":null,"e":10274,"s":10107,"text":"The official website for CouchDB is https://couchdb.apache.org. If you click the given link, you can get the home page of the CouchDB official website as shown below."},{"code":null,"e":10445,"s":10274,"text":"If you click on the download button that will lead to a page where download links of CouchDB in various formats are provided. The following snapshot illustrates the same."},{"code":null,"e":10553,"s":10445,"text":"Choose the download link for windows systems and select one of the provided mirrors to start your download."},{"code":null,"e":10713,"s":10553,"text":"CouchDB will be downloaded to your system in the form of setup file named setup-couchdb-1.6.1_R16B02.exe. Run the setup file and proceed with the\ninstallation."},{"code":null,"e":10918,"s":10713,"text":"After installation, open built-in web interface of CouchDB by visiting the following \nlink: http://127.0.0.1:5984/. If everything goes fine, this will give you a web page, which have the following output."},{"code":null,"e":11093,"s":10918,"text":"{\n \"couchdb\":\"Welcome\",\"uuid\":\"c8d48ac61bb497f4692b346e0f400d60\",\n \"version\":\"1.6.1\",\n \"vendor\":{\n \"version\":\"1.6.1\",\"name\":\"The Apache Software Foundation\"\n }\n}"},{"code":null,"e":11170,"s":11093,"text":"You can interact with the CouchDB web interface by using the following url −"},{"code":null,"e":11201,"s":11170,"text":"http://127.0.0.1:5984/_utils/\n"},{"code":null,"e":11280,"s":11201,"text":"This shows you the index page of Futon, which is the web interface of CouchDB."},{"code":null,"e":11402,"s":11280,"text":"For many of the Linux flavored systems, they provide CouchDB internally. To install this CouchDB follow the instructions."},{"code":null,"e":11437,"s":11402,"text":"On Ubuntu and Debian you can use −"},{"code":null,"e":11467,"s":11437,"text":"sudo aptitude install couchdb"},{"code":null,"e":11521,"s":11467,"text":"On Gentoo Linux there is a CouchDB ebuild available −"},{"code":null,"e":11541,"s":11521,"text":"sudo emerge couchdb"},{"code":null,"e":11650,"s":11541,"text":"If your Linux system does not have CouchDB, follow the next section to install CouchDB and its dependencies."},{"code":null,"e":11744,"s":11650,"text":"Following is the list of dependencies that are to be installed to get CouchDB in your system−"},{"code":null,"e":11755,"s":11744,"text":"Erlang OTP"},{"code":null,"e":11759,"s":11755,"text":"ICU"},{"code":null,"e":11767,"s":11759,"text":"OpenSSL"},{"code":null,"e":11788,"s":11767,"text":"Mozilla SpiderMonkey"},{"code":null,"e":11797,"s":11788,"text":"GNU Make"},{"code":null,"e":11821,"s":11797,"text":"GNU Compiler Collection"},{"code":null,"e":11829,"s":11821,"text":"libcurl"},{"code":null,"e":11838,"s":11829,"text":"help2man"},{"code":null,"e":11854,"s":11838,"text":"Python for docs"},{"code":null,"e":11868,"s":11854,"text":"Python Sphinx"},{"code":null,"e":12062,"s":11868,"text":"To install these dependencies, type the following commands in the terminal. Here we are using Centos 6.5 and the following commands will install the required softwares compatible to Centos 6.5."},{"code":null,"e":12481,"s":12062,"text":"$sudo yum install autoconf\n$sudo yum install autoconf-archive\n$sudo yum install automake\n$sudo yum install curl-devel\n$sudo yum install erlang-asn1\n$sudo yum install erlang-erts\n$sudo yum install erlang-eunit\n$sudo yum install erlang-os_mon\n$sudo yum install erlang-xmerl\n$sudo yum install help2man\n$sudo yum install js-devel\n$sudo yum install libicu-devel\n$sudo yum install libtool\n$sudo yum install perl-Test-Harness"},{"code":null,"e":12593,"s":12481,"text":"Note − For all these commands you need to use sudo. The following procedure converts a normal user to a sudoer."},{"code":null,"e":12621,"s":12593,"text":"Login as root in admin user"},{"code":null,"e":12649,"s":12621,"text":"Login as root in admin user"},{"code":null,"e":12694,"s":12649,"text":"Open sudo file using the following command −"},{"code":null,"e":12739,"s":12694,"text":"Open sudo file using the following command −"},{"code":null,"e":12746,"s":12739,"text":"visudo"},{"code":null,"e":12822,"s":12746,"text":"Then edit as shown below to give your existing user the sudoer privileges −"},{"code":null,"e":12898,"s":12822,"text":"Hadoop All=(All) All , and press esc : x to write the changes to the file. "},{"code":null,"e":13004,"s":12898,"text":"After downloading all the dependencies in your system, download CouchDB following the given instructions."},{"code":null,"e":13128,"s":13004,"text":"Apache software foundation will not provide the complete .tar file for CouchDB,\nso you have to install it from the source. "},{"code":null,"e":13270,"s":13128,"text":"Create a new directory to install CouchDB, browse to such created directory and download CouchDB source by executing the following commands −"},{"code":null,"e":13432,"s":13270,"text":"$ cd\n$ mkdir CouchDB\n$ cd CouchDB/\n$ wget\nhttp://www.google.com/url?q=http%3A%2F%2Fwww.apache.org%2Fdist%2Fcouchdb%2Fsource%2F1.6.1%2Fapache-couchdb-1.6.1.tar.gz"},{"code":null,"e":13547,"s":13432,"text":"This will download CouchDB source file into your system. Now unzip the apache-couchdb-1.6.1.tar.gz as shown below."},{"code":null,"e":13586,"s":13547,"text":"$ tar zxvf apache-couchdb-1.6.1.tar.gz"},{"code":null,"e":13627,"s":13586,"text":"To configure CouchDB, do the following −"},{"code":null,"e":13665,"s":13627,"text":"Browse to the home folder of CouchDB."},{"code":null,"e":13685,"s":13665,"text":"Login as superuser."},{"code":null,"e":13737,"s":13685,"text":"Configure using ./configure prompt as shown below −"},{"code":null,"e":13837,"s":13737,"text":"$ cd apache-couchdb-1.6.1\n$ su\nPassword:\n# ./configure --with-erlang=/usr/lib64/erlang/usr/include/"},{"code":null,"e":13982,"s":13837,"text":"It gives you the following output similar to that shown below with a concluding\nline saying − You have configured Apache CouchDB, time to relax."},{"code":null,"e":14831,"s":13982,"text":"# ./configure --with-erlang=/usr/lib64/erlang/usr/include/\n\nchecking for a BSD-compatible install... /usr/bin/install -c\nchecking whether build environment is sane... yes\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\nchecking for gawk... gawk\nchecking whether make sets $(MAKE)... yes\nchecking how to create a ustar tar archive... gnutar\n.................................................................\n............................\nconfig.status: creating var/Makefile\nconfig.status: creating config.h\nconfig.status: config.h is unchanged\nconfig.status: creating src/snappy/google-snappy/config.h\nconfig.status: src/snappy/google-snappy/config.h is unchanged\nconfig.status: executing depfiles commands\nconfig.status: executing libtool commands\n\nYou have configured Apache CouchDB, time to relax.\n\nRun `make && sudo make install' to install."},{"code":null,"e":14897,"s":14831,"text":"Now type the following command to install CouchDB in your system."},{"code":null,"e":14925,"s":14897,"text":"# make && sudo make install"},{"code":null,"e":15042,"s":14925,"text":"It installs CouchDB in your system with a concluding line saying − You have installed Apache CouchDB, time to relax."},{"code":null,"e":15126,"s":15042,"text":"To start CouchDB, browse to the CouchDB home folder and use the following command −"},{"code":null,"e":15177,"s":15126,"text":"$ cd apache-couchdb-1.6.1\n$ cd etc\n$ couchdb start"},{"code":null,"e":15226,"s":15177,"text":"It starts CouchDB giving the following output: −"},{"code":null,"e":15500,"s":15226,"text":"Apache CouchDB 1.6.1 (LogLevel=info) is starting.\nApache CouchDB has started. Time to relax.\n[info] [lt;0.31.0gt;] Apache CouchDB has started on http://127.0.0.1:5984/\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET / 200\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET /favicon.ico 200\n"},{"code":null,"e":15589,"s":15500,"text":"Since CouchDB is a web interface, try to type the following homepage url in the browser."},{"code":null,"e":15612,"s":15589,"text":"http://127.0.0.1:5984/"},{"code":null,"e":15647,"s":15612,"text":"It produces the following output −"},{"code":null,"e":15834,"s":15647,"text":"{\n \"couchdb\":\"Welcome\",\n \"uuid\":\"8f0d59acd0e179f5e9f0075fa1f5e804\",\n \"version\":\"1.6.1\",\n \"vendor\":{\n \"name\":\"The Apache Software Foundation\",\n \"version\":\"1.6.1\"\n }\n}\n"},{"code":null,"e":15885,"s":15834,"text":"cURL utility is a way to communicate with CouchDB."},{"code":null,"e":16260,"s":15885,"text":"It is a tool to transfer data from or to a server, using one of the supported protocols (HTTP, HTTPS, FTP, FTPS, TFTP, DICT, TELNET, LDAP or FILE). The command is designed to work without user interaction. cURL offers a busload of useful tricks like proxy support, user authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file transfer resume and more."},{"code":null,"e":16512,"s":16260,"text":"The cURL utility is available in operating systems such as UNIX, Linux, Mac OS X and Windows. It is a command line utility using which user can access HTTP protocol straight away from the command line. This chapter teaches you how to use cURL utility."},{"code":null,"e":16629,"s":16512,"text":"You can access any website using cURL utility by simply typing cURL followed by the website address as shown below −"},{"code":null,"e":16658,"s":16629,"text":"curl www.tutorialspoint.com/"},{"code":null,"e":16780,"s":16658,"text":"By default, the cURL utility returns the source code of the requested page. It displays this code on the terminal window."},{"code":null,"e":16875,"s":16780,"text":"cURL utility provides various options to work with, and you can see them in cURL utility help."},{"code":null,"e":16927,"s":16875,"text":"The following code shows some portion of cURL help."},{"code":null,"e":19487,"s":16927,"text":"$ curl --help\nUsage: curl [options...] \nOptions: (H) means HTTP/HTTPS only, (F) means FTP only\n --anyauth Pick \"any\" authentication method (H)\n -a/--append Append to target file when uploading (F/SFTP)\n --basic Use HTTP Basic Authentication (H)\n --cacert CA certificate to verify peer against (SSL)\n-d/--data HTTP POST data (H)\n --data-ascii HTTP POST ASCII data (H)\n --data-binary HTTP POST binary data (H)\n --data-urlencode HTTP POST data\nurlencoded (H)\n --delegation STRING GSS-API delegation permission\n --digest Use HTTP Digest Authentication (H)\n --disable-eprt Inhibit using EPRT or LPRT (F)\n --disable-epsv Inhibit using EPSV (F)\n\n -F/--form Specify HTTP multipart POST data (H)\n --form-string Specify HTTP multipart POST data (H)\n --ftp-account Account data to send when requested by server\n(F)\n --ftp-alternative-to-user String to replace \"USER [name]\" (F)\n --ftp-create-dirs Create the remote dirs if not present (F)\n --ftp-method [multi cwd/no cwd/single cwd] Control CWD usage (F)\n --ftp-pasv Use PASV/EPSV instead of PORT (F)\n\n -G/--get Send the -d data with a HTTP GET (H)\n\n -H/--header Custom header to pass to server (H)\n -I/--head Show document info only\n -h/--help This help text\n --hostpubmd5 Hex encoded MD5 string of the host public key.\n(SSH)\n -0/--http1.0 Use HTTP 1.0 (H)\n --ignore-content-length Ignore the HTTP Content-Length header\n -i/--include Include protocol headers in the output (H/F)\n\n -M/--manual Display the full manual\n\n -o/--output Write output to instead of stdout\n --pass Pass phrase for the private key (SSL/SSH)\n --post301 Do not switch to GET after following a 301\nredirect (H)\n --post302 Do not switch to GET after following a 302\nredirect (H)\n -O/--remote-name Write output to a file named as the remote file\n --remote-name-all Use the remote file name for all URLs\n -R/--remote-time Set the remote file's time on the local output\n -X/--request Specify request command to use\n --retry Retry request times if transient problems\noccur\n --retry-delay When retrying, wait this many seconds\nbetween each\n --retry-max-time Retry only within this period\n -T/--upload-file Transfer to remote site\n --url Set URL to work with\n -B/--use-ascii Use ASCII/text transfer"},{"code":null,"e":19688,"s":19487,"text":"While communicating with CouchDB, certain options of cURL utility were extensively used. Following are the brief descriptions of some important options of cURL utility including those used by CouchDB."},{"code":null,"e":19929,"s":19688,"text":"(HTTP) Specifies a custom request method used when communicating with the HTTP server. The specified request is used instead of the method otherwise used (which defaults to GET). Read the HTTP 1.1 specification for details and explanations."},{"code":null,"e":20021,"s":19929,"text":"(FTP) Specifies a custom FTP command to use instead of LIST when doing file lists with ftp."},{"code":null,"e":20541,"s":20021,"text":"(HTTP) Extra header is used when getting a web page. Note that if you add a custom header that has the same name as one of the internal ones cURL would\nuse, your externally set header will be used instead of the internal one. This allows you to make even trickier work than cURL would normally do. You should not replace internally set headers without perfectly knowing what you’re doing. Replacing an internal header with the one without content on the right side of the colon, will prevent that header from appearing."},{"code":null,"e":20749,"s":20541,"text":"cURL assures that each header you add/replace get sent with the proper end of line marker. Neither you should add that as a part of the header content nor add newlines or carriage returns to disorder things."},{"code":null,"e":20804,"s":20749,"text":"See also the -A/--user-agent and -e/--referer options."},{"code":null,"e":20883,"s":20804,"text":"This option can be used multiple times to add/replace/remove multiple headers."},{"code":null,"e":21033,"s":20883,"text":"Using this flag of cURL, you can send data along with the HTTP POST request to the server, as if it was filled by the user in the form and submitted."},{"code":null,"e":21041,"s":21033,"text":"Example"},{"code":null,"e":21177,"s":21041,"text":"Suppose there is a website and you want to login into it or send some data to the website using –d flag of cURL utility as shown below."},{"code":null,"e":21262,"s":21177,"text":"curl -X PUT http://mywebsite.com/login.html -d userid=001 -d password=tutorialspoint"},{"code":null,"e":21398,"s":21262,"text":"It sends a post chunk that looks like \"userid=001&password=tutorialspoint\". Likewise you can also send documents (JSON ) using -d flag."},{"code":null,"e":21464,"s":21398,"text":"Using this flag, cURL writes the output of the request to a file."},{"code":null,"e":21472,"s":21464,"text":"Example"},{"code":null,"e":21536,"s":21472,"text":"The following example shows the use of -o flag of cURL utility."},{"code":null,"e":21762,"s":21536,"text":"$ curl -o example.html www.tutorialspoint.com/index.htm \n% Total % Received % Xferd Average Speed Time Time Time Current \n Dload Upload Total Spent Left Speed\n100 81193 0 81193 0 0 48168 0 --:--:-- 0:00:01 --:--:--\n58077"},{"code":null,"e":21913,"s":21762,"text":"This gets the source code of the homepage of tutorialspoint.com, creates a file named example.com and saves the output in the file named example.html."},{"code":null,"e":21960,"s":21913,"text":"Following is the snapshot of the example.html."},{"code":null,"e":22154,"s":21960,"text":"This flag is similar to –o, the only difference is with this flag, a new file with the same name as the requested url was created, and the source code of the requested url will be copied to it."},{"code":null,"e":22162,"s":22154,"text":"Example"},{"code":null,"e":22226,"s":22162,"text":"The following example shows the use of -O flag of cURL utility."},{"code":null,"e":22437,"s":22226,"text":"$ curl -O www.tutorialspoint.com/index.htm\n% Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left\nSpeed\n100 81285 0 81285 0 0 49794 0 --:--:-- 0:00:01 --:--:--\n60077"},{"code":null,"e":22556,"s":22437,"text":"It creates a new file with the name index.htm and saves the source code of the index page of tutorialspoint.com in it."},{"code":null,"e":22848,"s":22556,"text":"You can access the homepage of the CouchDB by sending a GET request to the CouchDB instance installed. First of all make sure you have installed CouchDB in your Linux environment and it is running successfully, and then use the following syntax to send a get request to the CouchDB instance."},{"code":null,"e":22876,"s":22848,"text":"curl http://127.0.0.1:5984/"},{"code":null,"e":23031,"s":22876,"text":"This gives you a JSON document as shown below where CouchDB specifies the details such as version number, name of the vendor, and version of the software."},{"code":null,"e":23256,"s":23031,"text":"$ curl http://127.0.0.1:5984/\n{\n \"couchdb\" : \"Welcome\",\n \"uuid\" : \"8f0d59acd0e179f5e9f0075fa1f5e804\",\n \"version\" : \"1.6.1\",\n \"vendor\" : {\n \"name\":\"The Apache Software Foundation\",\n \"version\":\"1.6.1\"\n }\n}\n"},{"code":null,"e":23443,"s":23256,"text":"You can get the list of all the databases created, by sending a get request along with the string \"_all_dbs string \". Following is the syntax to get the list of all databases in CouchDB."},{"code":null,"e":23486,"s":23443,"text":"curl -X GET http://127.0.0.1:5984/_all_dbs"},{"code":null,"e":23552,"s":23486,"text":"It gives you the list of all databases in CouchDB as shown below."},{"code":null,"e":23626,"s":23552,"text":"$ curl -X GET http://127.0.0.1:5984/_all_dbs\n[ \"_replicator\" , \"_users\" ]"},{"code":null,"e":23719,"s":23626,"text":"You can create a database in CouchDB using cURL with PUT header using the following syntax −"},{"code":null,"e":23770,"s":23719,"text":"$ curl -X PUT http://127.0.0.1:5984/database_name\n"},{"code":null,"e":23870,"s":23770,"text":"As an example, using the above given syntax create a database with name my_database as shown below."},{"code":null,"e":23930,"s":23870,"text":"$ curl -X PUT http://127.0.0.1:5984/my_database\n{\"ok\":true}"},{"code":null,"e":24102,"s":23930,"text":"Verify whether the database is created, by listing out all the databases as shown\nbelow. Here you can observe the name of newly created database, \"my_database\" in the list"},{"code":null,"e":24194,"s":24102,"text":"$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \"_users\" , \"my_database\" ]"},{"code":null,"e":24346,"s":24194,"text":"You can get the information about database using the get request along with the database name. Following is the syntax to get the database information."},{"code":null,"e":24505,"s":24346,"text":"As an example let us get the information of the database named my_database as shown below. Here you can get the information about your database as a response."},{"code":null,"e":24852,"s":24505,"text":"$ curl -X GET http://127.0.0.1:5984/my_database\n\n{\n \"db_name\" : \"my_database\",\n \"doc_count\" : 0,\n \"doc_del_count\" : 0,\n \"update_seq\" : 0,\n \"purge_seq\" : 0,\n \"compact_running\" : false,\n \"disk_size\" : 79,\n \"data_size\" : 0,\n \"instance_start_time\" : \"1423628520835029\",\n \"disk_format_version\" : 6,\n \"committed_update_seq\" : 0\n }"},{"code":null,"e":25126,"s":24852,"text":"Futon is the built-in, web based, administration interface of CouchDB. It provides\na simple graphical interface using which you can interact with CouchDB. It is a naive interface and it provides full access to all CouchDB features. Following is the list of those features −"},{"code":null,"e":25145,"s":25126,"text":"Creates databases."},{"code":null,"e":25165,"s":25145,"text":"Destroys databases."},{"code":null,"e":25184,"s":25165,"text":"Creates documents."},{"code":null,"e":25203,"s":25184,"text":"Updates documents."},{"code":null,"e":25220,"s":25203,"text":"Edits documents."},{"code":null,"e":25239,"s":25220,"text":"Deletes documents."},{"code":null,"e":25313,"s":25239,"text":"Make sure CouchDB is running and then open the following url in browser −"},{"code":null,"e":25343,"s":25313,"text":"http://127.0.0.1:5984/_utils/"},{"code":null,"e":25414,"s":25343,"text":"If you open this url, it displays the Futon home page as shown below −"},{"code":null,"e":25634,"s":25414,"text":"On the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user."},{"code":null,"e":25854,"s":25634,"text":"On the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user."},{"code":null,"e":26349,"s":25854,"text":"On the right hand side you can see the following −\n\nTools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\nDocumentation − This section contains the complete documentation for the recent version of CouchDB.\nDiagnostics − Under this you can verify the installation of CouchDB.\nRecent Databases − Under this you can find the names of recently added databases.\n\n"},{"code":null,"e":26400,"s":26349,"text":"On the right hand side you can see the following −"},{"code":null,"e":26590,"s":26400,"text":"Tools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB."},{"code":null,"e":26780,"s":26590,"text":"Tools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB."},{"code":null,"e":26880,"s":26780,"text":"Documentation − This section contains the complete documentation for the recent version of CouchDB."},{"code":null,"e":26980,"s":26880,"text":"Documentation − This section contains the complete documentation for the recent version of CouchDB."},{"code":null,"e":27049,"s":26980,"text":"Diagnostics − Under this you can verify the installation of CouchDB."},{"code":null,"e":27118,"s":27049,"text":"Diagnostics − Under this you can verify the installation of CouchDB."},{"code":null,"e":27200,"s":27118,"text":"Recent Databases − Under this you can find the names of recently added databases."},{"code":null,"e":27282,"s":27200,"text":"Recent Databases − Under this you can find the names of recently added databases."},{"code":null,"e":27533,"s":27282,"text":"Using HTTP request headers, you can communicate with CouchDB. Through these requests we can retrieve data from the database, store data in to the database in the form of documents, and we can view as well as format the documents stored in a database."},{"code":null,"e":27796,"s":27533,"text":"While communicating with the database we will use different request formats like get, head, post, put, delete, and copy. For all operations in CouchDB, the input data and the output data structures will be in the form of JavaScript Object Notation (JSON) object."},{"code":null,"e":27891,"s":27796,"text":"Following are the different request formats of HTTP Protocol used to communicate with CouchDB."},{"code":null,"e":28178,"s":27891,"text":"GET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases)."},{"code":null,"e":28465,"s":28178,"text":"GET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases)."},{"code":null,"e":28570,"s":28465,"text":"HEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response."},{"code":null,"e":28675,"s":28570,"text":"HEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response."},{"code":null,"e":28861,"s":28675,"text":"POST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands."},{"code":null,"e":29047,"s":28861,"text":"POST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands."},{"code":null,"e":29150,"s":29047,"text":"PUT − Using PUT request, you can create new objects, databases, documents, views and design documents."},{"code":null,"e":29253,"s":29150,"text":"PUT − Using PUT request, you can create new objects, databases, documents, views and design documents."},{"code":null,"e":29339,"s":29253,"text":"DELETE − Using DELETE request, you can delete documents, views, and design documents."},{"code":null,"e":29425,"s":29339,"text":"DELETE − Using DELETE request, you can delete documents, views, and design documents."},{"code":null,"e":29487,"s":29425,"text":"COPY − Using COPY method, you can copy documents and objects."},{"code":null,"e":29549,"s":29487,"text":"COPY − Using COPY method, you can copy documents and objects."},{"code":null,"e":29776,"s":29549,"text":"HTTP headers should be supplied to get the right format and encoding. While sending the request to the CouchDB server, you can send Http request headers along with the request. Following are the different Http request headers."},{"code":null,"e":30067,"s":29776,"text":"Content-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended."},{"code":null,"e":30358,"s":30067,"text":"Content-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended."},{"code":null,"e":30775,"s":30358,"text":"Accept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons.\nThough, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client."},{"code":null,"e":31042,"s":30775,"text":"Accept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons."},{"code":null,"e":31192,"s":31042,"text":"Though, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client."},{"code":null,"e":31331,"s":31192,"text":"These are the headers of the response sent by the server. These headers give information about the content send by the server as response."},{"code":null,"e":31474,"s":31331,"text":"Content-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain."},{"code":null,"e":31617,"s":31474,"text":"Content-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain."},{"code":null,"e":31832,"s":31617,"text":"Cache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible."},{"code":null,"e":32047,"s":31832,"text":"Cache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible."},{"code":null,"e":32140,"s":32047,"text":"Content-length − This header returns the length of the content sent by the server, in bytes."},{"code":null,"e":32233,"s":32140,"text":"Content-length − This header returns the length of the content sent by the server, in bytes."},{"code":null,"e":32308,"s":32233,"text":"Etag − This header is used to show the revision for a document, or a view."},{"code":null,"e":32383,"s":32308,"text":"Etag − This header is used to show the revision for a document, or a view."},{"code":null,"e":32483,"s":32383,"text":"Following is the tabular form of the status code sent by the http header and the description of it."},{"code":null,"e":32492,"s":32483,"text":"200 − OK"},{"code":null,"e":32558,"s":32492,"text":"This status will be issued when a request completed successfully."},{"code":null,"e":32572,"s":32558,"text":"201 − Created"},{"code":null,"e":32627,"s":32572,"text":"This status will be issued when a document is created."},{"code":null,"e":32642,"s":32627,"text":"202 − Accepted"},{"code":null,"e":32697,"s":32642,"text":"This status will be issued when a request is accepted."},{"code":null,"e":32713,"s":32697,"text":"404 − Not Found"},{"code":null,"e":32797,"s":32713,"text":"This status will be issued when the server is unable to find the requested content."},{"code":null,"e":32824,"s":32797,"text":"405 − Resource Not Allowed"},{"code":null,"e":32890,"s":32824,"text":"This status is issued when the HTTP request type used is invalid."},{"code":null,"e":32905,"s":32890,"text":"409 − Conflict"},{"code":null,"e":32966,"s":32905,"text":"This status is issued whenever there is any update conflict."},{"code":null,"e":32989,"s":32966,"text":"415 − Bad Content Type"},{"code":null,"e":33075,"s":32989,"text":"This status indicated that the requested content type is not supported by the server."},{"code":null,"e":33103,"s":33075,"text":"500 − Internal Server Error"},{"code":null,"e":33175,"s":33103,"text":"This status is issued whenever the data sent in the request is invalid."},{"code":null,"e":33312,"s":33175,"text":"There are certain url paths using which, you can interact with the database directly. Following is the tabular format of such url paths."},{"code":null,"e":33320,"s":33312,"text":"PUT /db"},{"code":null,"e":33363,"s":33320,"text":"This url is used to create a new database."},{"code":null,"e":33371,"s":33363,"text":"GET /db"},{"code":null,"e":33440,"s":33371,"text":"This url is used to get the information about the existing database."},{"code":null,"e":33457,"s":33440,"text":"PUT /db/document"},{"code":null,"e":33524,"s":33457,"text":"This url is used to create a document/update an existing document."},{"code":null,"e":33541,"s":33524,"text":"GET /db/document"},{"code":null,"e":33579,"s":33541,"text":"This url is used to get the document."},{"code":null,"e":33599,"s":33579,"text":"DELETE /db/document"},{"code":null,"e":33678,"s":33599,"text":"This url is used to delete the specified document from the specified database."},{"code":null,"e":33705,"s":33678,"text":"GET /db/_design/design-doc"},{"code":null,"e":33766,"s":33705,"text":"This url is used to get the definition of a design document."},{"code":null,"e":33808,"s":33766,"text":"GET /db/_design/designdoc/_view/view-name"},{"code":null,"e":33909,"s":33808,"text":"This url is used to access the view, view-name from the design document from the specified database."},{"code":null,"e":34112,"s":33909,"text":"Database is the outermost data structure in CouchDB where your documents are stored. You can create these databases using cURL utility provided by CouchDB, as well as Futon the web interface of CouchDB."},{"code":null,"e":34276,"s":34112,"text":"You can create a database in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a database −"},{"code":null,"e":34327,"s":34276,"text":"$ curl -X PUT http://127.0.0.1:5984/database name\n"},{"code":null,"e":34649,"s":34327,"text":"Using −X we can specify HTTP custom request method to be used. In this case, we are using PUT method. When we use the PUT operation/method, the content of the url specifies the object name we are creating using HTTP request. Here we have to send the name of the database using put request in the url to create a database."},{"code":null,"e":34763,"s":34649,"text":"Using the above given syntax if you want to create a database with name my_database, you can create it as follows"},{"code":null,"e":34827,"s":34763,"text":"curl -X PUT http://127.0.0.1:5984/my_database\n{\n \"ok\":true\n}\n"},{"code":null,"e":34950,"s":34827,"text":"As a response the server will return you a JSON document with content “ok” − true indicating the operation was successful."},{"code":null,"e":35127,"s":34950,"text":"Verify whether the database is created, by listing out all the databases as shown below. Here you can observe the name of a newly created database, \" my_database \" in the list."},{"code":null,"e":35223,"s":35127,"text":"$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \" _users \" , \" my_database \" ]"},{"code":null,"e":35347,"s":35223,"text":"To create a database open the http://127.0.0.1:5984/_utils/. You will get\nan Overview/index page of CouchDB as shown below."},{"code":null,"e":35463,"s":35347,"text":"In this page, you can see the list of databases in CouchDB, an option button Create Database on the left hand side."},{"code":null,"e":35781,"s":35463,"text":"Now click on the create database link. You can see a popup window Create New Databases asking for the database name for the new database. Choose any name following the mentioned criteria. Here we are creating another database with name tutorials_point. Click on the create button as shown in the following screenshot."},{"code":null,"e":35942,"s":35781,"text":"You can delete a database in CouchDB by sending a request to the server using DELETE method through cURL utility. Following is the syntax to create a database −"},{"code":null,"e":35996,"s":35942,"text":"$ curl -X DELETE http://127.0.0.1:5984/database name\n"},{"code":null,"e":36229,"s":35996,"text":"Using −X we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using the DELETE method. Send the url to the server by specifying the database to be deleted in it."},{"code":null,"e":36369,"s":36229,"text":"Assume there is a database named my_database2 in CouchDB. Using the above given syntax if you want to delete it, you can do it as follows −"},{"code":null,"e":36440,"s":36369,"text":"$ curl -X DELETE http://127.0.0.1:5984/my_database2\n{\n \"ok\" : true\n}"},{"code":null,"e":36564,"s":36440,"text":"As a response, the server will return you a JSON document with content “ok” − true indicating the operation was successful."},{"code":null,"e":36747,"s":36564,"text":"Verify whether the database is deleted by listing out all the databases as shown below. Here you can observe the name of the deleted database, \"my_database\" is not there in the list."},{"code":null,"e":36825,"s":36747,"text":"$ curl -X GET http://127.0.0.1:5984/_all_dbs\n\n[ \"_replicator \" , \" _users \" ]"},{"code":null,"e":36959,"s":36825,"text":"To delete a database, open the http://127.0.0.1:5984/_utils/ url where you will get an Overview/index page of CouchDB as shown below."},{"code":null,"e":37294,"s":36959,"text":"Here you can see three user created databases. Let us delete the database named tutorials_point2. To delete a database, select one from the list of databases, and click on it, which will lead to the overview page of the selected database where you can see the various operations on databases. The following screenshot shows the same −"},{"code":null,"e":37468,"s":37294,"text":"Among them you can find Delete Database option. By clicking on it you will get a popup window, asking whether you are sure! Click on delete, to delete the selected database."},{"code":null,"e":37754,"s":37468,"text":"Documents are CouchDB’s central data structure. Contents of the database will be stored in the form of Documents instead of tables. You can create these documents using cURL utility provided by CouchDB, as well as Futon. This chapter covers the ways to create a document in a database."},{"code":null,"e":38034,"s":37754,"text":"Each document in CouchDB has a unique ID. You can choose your own ID that should be in the form of a string. Generally, UUID (Universally Unique IDentifier) is used, which are random numbers that have least chance of creating a duplicate. These are preferred to avoid collisions."},{"code":null,"e":38197,"s":38034,"text":"You can create a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a document."},{"code":null,"e":38271,"s":38197,"text":"$ curl -X PUT http://127.0.0.1:5984/database name/\"id\" -d ' { document} '"},{"code":null,"e":38577,"s":38271,"text":"Using −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using PUT method. When we use the PUT method, the content of the url specifies the object name we are creating using the HTTP request. Here we have to send the following −"},{"code":null,"e":38646,"s":38577,"text":"The name of the database name in which we are creating the document."},{"code":null,"e":38715,"s":38646,"text":"The name of the database name in which we are creating the document."},{"code":null,"e":38732,"s":38715,"text":"The document id."},{"code":null,"e":38749,"s":38732,"text":"The document id."},{"code":null,"e":38962,"s":38749,"text":"The data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −"},{"code":null,"e":39175,"s":38962,"text":"The data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −"},{"code":null,"e":39232,"s":39175,"text":"{\n Name : Raju\n age : 23\n Designation : Designer\n}"},{"code":null,"e":39377,"s":39232,"text":"Using the above given syntax if you want to create a document with id 001 in a database with name my_database, you can create it as shown below."},{"code":null,"e":39578,"s":39377,"text":"$ curl -X PUT http://127.0.0.1:5984/my_database/\"001\" -d\n'{ \" Name \" : \" Raju \" , \" age \" :\" 23 \" , \" Designation \" : \" Designer \" }'\n\n{\"ok\":true,\"id\":\"001\",\"rev\":\"1-1c2fae390fa5475d9b809301bbf3f25e\"}"},{"code":null,"e":39642,"s":39578,"text":"The response of CouchDB to this request contains three fields −"},{"code":null,"e":39689,"s":39642,"text":"\"ok\", indicating the operation was successful."},{"code":null,"e":39736,"s":39689,"text":"\"ok\", indicating the operation was successful."},{"code":null,"e":39782,"s":39736,"text":"\"id\", which stores the id of the document and"},{"code":null,"e":39828,"s":39782,"text":"\"id\", which stores the id of the document and"},{"code":null,"e":40205,"s":39828,"text":"\"rev\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control."},{"code":null,"e":40582,"s":40205,"text":"\"rev\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control."},{"code":null,"e":40673,"s":40582,"text":"If you want to view the created document you can get it using the document as shown below."},{"code":null,"e":40857,"s":40673,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"1-3fcc78daac7a90803f0a5e383f4f1e1e\",\n \"Name\": \"Raju\",\n \"age\": 23,\n \"Designation\": \"Designer\"\n}"},{"code":null,"e":40978,"s":40857,"text":"To Create a document open the http://127.0.0.1:5984/_utils/ url to get an Overview/index page of CouchDB as shown below."},{"code":null,"e":41126,"s":40978,"text":"Select the database in which you want to create the document. Open the Overview page of the database and select New Document option as shown below."},{"code":null,"e":41397,"s":41126,"text":"When you select the New Document option, CouchDB creates a new database document, assigning it a new id. You can edit the value of the id and can assign your own value in the form of a string. In the following illustration, we have created a new document with an id 001."},{"code":null,"e":41491,"s":41397,"text":"In this page, you can observe three options − save Document, Add Field and Upload Attachment."},{"code":null,"e":41844,"s":41491,"text":"To add field to the document click on Add Field option. After creating a database, you can add a field to it using this option. Clicking on it will get you a pair of text boxes, namely, Field, value. You can edit these values by clicking on them. Edit those values and type your desired Field-Value pair. Click on the green button to save these values."},{"code":null,"e":41948,"s":41844,"text":"In the following illustration, we have created three fields Name, age and, Designation of the employee."},{"code":null,"e":42084,"s":41948,"text":"You can save the changes made to the document by clicking on this option. After saving, a new id _rev will be generated as shown below."},{"code":null,"e":42247,"s":42084,"text":"You can update a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to update a document."},{"code":null,"e":42359,"s":42247,"text":"curl -X PUT http://127.0.0.1:5984/database_name/document_id/ -d '{ \"field\" : \"value\", \"_rev\" : \"revision id\" }'"},{"code":null,"e":42470,"s":42359,"text":"Suppose there is a document with id 001 in the database named my_database. You can delete this as shown below."},{"code":null,"e":42648,"s":42470,"text":"First of all, get the revision id of the document that is to be updated. You can find the _rev of the document in the document itself, therefore get the document as shown below."},{"code":null,"e":42790,"s":42648,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\" : \"001\",\n \"_rev\" : \"2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \"age\" : \"23\"\n}"},{"code":null,"e":42897,"s":42790,"text":"Use revision id _rev from the document to update the document. Here we are updating the age from 23 to 24."},{"code":null,"e":43119,"s":42897,"text":"$ curl -X PUT http://127.0.0.1:5984/my_database/001/ -d\n' { \" age \" : \" 24 \" , \" _rev \" : \" 1-1c2fae390fa5475d9b809301bbf3f25e \" } '\n\n{ \" ok \" : true , \" id \" : \" 001 \" , \" rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" }"},{"code":null,"e":43200,"s":43119,"text":"To verify the document, get the document again using GET request as shown below."},{"code":null,"e":43355,"s":43200,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \" _id \" : \" 001 \",\n \" _rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \" age \" : \" 23 \"\n }\n"},{"code":null,"e":43430,"s":43355,"text":"Following are some important points to be noted while updating a document."},{"code":null,"e":43511,"s":43430,"text":"The URL we send in the request containing the database name and the document id."},{"code":null,"e":43592,"s":43511,"text":"The URL we send in the request containing the database name and the document id."},{"code":null,"e":43815,"s":43592,"text":"Updating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID."},{"code":null,"e":44038,"s":43815,"text":"Updating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID."},{"code":null,"e":44107,"s":44038,"text":"We have to supply the revision number as a part of the JSON request."},{"code":null,"e":44176,"s":44107,"text":"We have to supply the revision number as a part of the JSON request."},{"code":null,"e":44395,"s":44176,"text":"In return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number."},{"code":null,"e":44614,"s":44395,"text":"In return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number."},{"code":null,"e":44735,"s":44614,"text":"To delete a document open the http://127.0.0.1:5984/_utils/ url to get an\nOverview/index page of CouchDB as shown below."},{"code":null,"e":44950,"s":44735,"text":"Select the database in which the document to be updated exists and click it. Here we are updating a document in the database named tutorials_point. You will get the list of documents in the database as shown below."},{"code":null,"e":45068,"s":44950,"text":"Select a document that you want to update and click on it. You will get the contents of the documents as shown below."},{"code":null,"e":45224,"s":45068,"text":"Here, to update the location from Delhi to Hyderabad, click on the text box, edit the field, and click the green button to save the changes as shown below."},{"code":null,"e":45390,"s":45224,"text":"You can delete a document in CouchDB by sending an HTTP request to the server using DELETE method through cURL utility. Following is the syntax to delete a document."},{"code":null,"e":45466,"s":45390,"text":"curl -X DELETE http : // 127.0.0.1:5984 / database name/database id?_rev id"},{"code":null,"e":45799,"s":45466,"text":"Using −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using Delete method. To delete a database /database_name/database_id/ is not enough. You have to pass the recent revision id through the url. To mention attributes of any data structure \"?\" is used."},{"code":null,"e":45986,"s":45799,"text":"Suppose there is a document in database named my_database with document id 001. To delete this document, you have to get the rev id of the document. Get the document data as shown below."},{"code":null,"e":46139,"s":45986,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \" _id \" : \" 001 \",\n \" _rev \" : \" 2-04d8eac1680d237ca25b68b36b8899d3 \" ,\n \" age \" : \" 23 \"\n}"},{"code":null,"e":46278,"s":46139,"text":"Now specify the revision id of the document to be deleted, id of the document, and database name the document belongs to, as shown below −"},{"code":null,"e":46440,"s":46278,"text":"$ curl -X DELETE http://127.0.0.1:5984/my_database/001?rev=1-\n3fcc78daac7a90803f0a5e383f4f1e1e\n\n{\"ok\":true,\"id\":\"001\",\"rev\":\"2-3a561d56de1ce3305d693bd15630bf96\"}"},{"code":null,"e":46630,"s":46440,"text":"To verify whether the document is deleted, try to fetch the document by using the GET method. Since you are fetching a deleted document, this will give you an error message as shown below −"},{"code":null,"e":46724,"s":46630,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\"error\":\"not_found\",\"reason\":\"deleted\"}\n"},{"code":null,"e":46841,"s":46724,"text":"First of all, verify the documents in the database. Following is the snapshot of the database named tutorials_point."},{"code":null,"e":46964,"s":46841,"text":"Here you can observe, the database consists of three documents. To delete any of the documents say 003, do the following −"},{"code":null,"e":47083,"s":46964,"text":"Click on the document, you will get a page showing the contents of selected document in the form of field-value pairs."},{"code":null,"e":47202,"s":47083,"text":"Click on the document, you will get a page showing the contents of selected document in the form of field-value pairs."},{"code":null,"e":47308,"s":47202,"text":"This page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document."},{"code":null,"e":47414,"s":47308,"text":"This page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document."},{"code":null,"e":47447,"s":47414,"text":"Click on Delete Document option."},{"code":null,"e":47480,"s":47447,"text":"Click on Delete Document option."},{"code":null,"e":47603,"s":47480,"text":"You will get a dialog box saying \"Are you sure you want to delete this document?\" Click on delete, to delete the document."},{"code":null,"e":47726,"s":47603,"text":"You will get a dialog box saying \"Are you sure you want to delete this document?\" Click on delete, to delete the document."},{"code":null,"e":48021,"s":47726,"text":"You can attach files to CouchDB just like email. The file contains metadata like name and includes its MIME type, and the number of bytes the attachment contains. To attach files to a document you have to send PUT request to the server. Following is the syntax to attach files to the document −"},{"code":null,"e":48177,"s":48021,"text":"$ curl -vX PUT http://127.0.0.1:5984/database_name/database_id\n/filename?rev=document rev_id --data-binary @filename -H \"Content-Type:\ntype of the content\""},{"code":null,"e":48235,"s":48177,"text":"The request has various options that are explained below."},{"code":null,"e":48329,"s":48235,"text":"--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body."},{"code":null,"e":48423,"s":48329,"text":"--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body."},{"code":null,"e":48512,"s":48423,"text":"-H − This option is used to mention the content type of the file we are going to upload."},{"code":null,"e":48601,"s":48512,"text":"-H − This option is used to mention the content type of the file we are going to upload."},{"code":null,"e":48845,"s":48601,"text":"Let us attach a file named boy.jpg, to the document with id 001, in the database named my_database by sending PUT request to CouchDB. Before that, you have to fetch the data of the document with id 001 to get its current rev id as shown below."},{"code":null,"e":48966,"s":48845,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"1-967a00dff5e02add41819138abb3284d\"\n}"},{"code":null,"e":49051,"s":48966,"text":"Now using the _rev value, send the PUT request to the CouchDB server as shown below."},{"code":null,"e":49203,"s":49051,"text":"$ curl -vX PUT http://127.0.0.1:5984/my_database/001/boy.jpg?rev=1-\n967a00dff5e02add41819138abb3284d --data-binary @boy.jpg -H \"ContentType:\nimage/jpg\""},{"code":null,"e":49292,"s":49203,"text":"To verify whether the attachment is uploaded, fetch the document content as shown below−"},{"code":null,"e":49627,"s":49292,"text":"$ curl -X GET http://127.0.0.1:5984/my_database/001\n{\n \"_id\": \"001\",\n \"_rev\": \"2-4705a219cdcca7c72aac4f623f5c46a8\",\n \"_attachments\": {\n \"boy.jpg\": {\n \"content_type\": \"image/jpg\",\n \"revpos\": 2,\n \"digest\": \"md5-9Swz8jvmga5mfBIsmCxCtQ==\",\n \"length\": 91408,\n \"stub\": true\n }\n }\n}\n"},{"code":null,"e":49899,"s":49627,"text":"Using this option, you can upload a new attachment such as a file, image, or document, to the database. To do so, click on the Upload Attachment button. A dialog box will appear where you can choose the file to be uploaded. Select the file and click on the Upload button."},{"code":null,"e":50007,"s":49899,"text":"The file uploaded will be displayed under _attachments field. Later you can see the file by clicking on it."},{"code":null,"e":50014,"s":50007,"text":" Print"},{"code":null,"e":50025,"s":50014,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 2087,\n \"s\": 1838,\n \"text\": \"Database management system provides mechanism for storage and retrieval of data. There are three main types of database management systems namely RDBMS (Relational Database management Systems), OLAP (Online Analytical\\nProcessing Systems) and NoSQL.\"\n },\n {\n \"code\": null,\n \"e\": 2273,\n \"s\": 2087,\n \"text\": \"RDBMS stands for Relational Database Management System. RDBMS is the basis for SQL, and for all modern database systems like MS SQL Server, IBM DB2, Oracle, MySQL, and Microsoft Access.\"\n },\n {\n \"code\": null,\n \"e\": 2427,\n \"s\": 2273,\n \"text\": \"A Relational database management system (RDBMS) is a database management system (DBMS) that is based on the relational model as introduced by E. F. Codd.\"\n },\n {\n \"code\": null,\n \"e\": 2609,\n \"s\": 2427,\n \"text\": \"The data in RDBMS is stored in database objects called tables. The table is a collection of related data entries and it consists of columns and rows. It stores only structured data.\"\n },\n {\n \"code\": null,\n \"e\": 2831,\n \"s\": 2609,\n \"text\": \"Online Analytical Processing Server (OLAP) is based on the multidimensional data model. It allows managers and analysts to get an insight of the information through fast, consistent, and interactive access to information.\"\n },\n {\n \"code\": null,\n \"e\": 3160,\n \"s\": 2831,\n \"text\": \"A NoSQL database (sometimes called as Not Only SQL) is a database that provides a mechanism to store and retrieve data other than the tabular relations used in relational databases. These databases are schema-free, support easy replication, have simple API, eventually consistent, and can handle huge amounts of data (big data).\"\n },\n {\n \"code\": null,\n \"e\": 3229,\n \"s\": 3160,\n \"text\": \"The primary objective of a NoSQL database is to have the following −\"\n },\n {\n \"code\": null,\n \"e\": 3251,\n \"s\": 3229,\n \"text\": \"Simplicity of design,\"\n },\n {\n \"code\": null,\n \"e\": 3275,\n \"s\": 3251,\n \"text\": \"Horizontal scaling, and\"\n },\n {\n \"code\": null,\n \"e\": 3308,\n \"s\": 3275,\n \"text\": \"Finer control over availability.\"\n },\n {\n \"code\": null,\n \"e\": 3706,\n \"s\": 3308,\n \"text\": \"NoSQL databases use different data structures compared to relational databases. It makes some operations faster in NoSQL. The suitability of a given NoSQL database depends on the problem it must solve. These databases store both structured data and unstructured data like audio files, video files, documents, etc. These NoSQL databases are classified into three types and they are explained below.\"\n },\n {\n \"code\": null,\n \"e\": 3924,\n \"s\": 3706,\n \"text\": \"Key-value Store − These databases are designed for storing data in key-value pairs and these databases will not have any schema. In these databases, each data value consists of an indexed key and a value for that key.\"\n },\n {\n \"code\": null,\n \"e\": 3974,\n \"s\": 3924,\n \"text\": \"Examples − BerkeleyDB, Cassandra, DynamoDB, Riak.\"\n },\n {\n \"code\": null,\n \"e\": 4178,\n \"s\": 3974,\n \"text\": \"Column Store − In these databases, data is stored in cells grouped in columns of data, and these columns are further grouped into Column families. These column families can contain any number of columns.\"\n },\n {\n \"code\": null,\n \"e\": 4222,\n \"s\": 4178,\n \"text\": \"Examples − BigTable, HBase, and HyperTable.\"\n },\n {\n \"code\": null,\n \"e\": 4566,\n \"s\": 4222,\n \"text\": \"Document Store − These are the databases developed on the basic idea of key-value stores where \\\"documents\\\" contain more complex data. Here, each document is assigned a unique key, which is used to retrieve the document. These are designed for storing, retrieving, and managing document-oriented information, also known as semi-structured data.\"\n },\n {\n \"code\": null,\n \"e\": 4598,\n \"s\": 4566,\n \"text\": \"Examples − CouchDB and MongoDB.\"\n },\n {\n \"code\": null,\n \"e\": 4765,\n \"s\": 4598,\n \"text\": \"CouchDB is an open source database developed by Apache software foundation. The focus is on the ease of use, embracing the web. It is a NoSQL document store database.\"\n },\n {\n \"code\": null,\n \"e\": 5046,\n \"s\": 4765,\n \"text\": \"It uses JSON, to store data (documents), java script as its query language to transform the documents, http protocol for api to access the documents, query the indices with the web browser. It is a multi master application released in 2005 and it became an apache project in 2008.\"\n },\n {\n \"code\": null,\n \"e\": 5242,\n \"s\": 5046,\n \"text\": \"CouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use.\"\n },\n {\n \"code\": null,\n \"e\": 5438,\n \"s\": 5242,\n \"text\": \"CouchDB have an HTTP-based REST API, which helps to communicate with the database easily. And the simple structure of HTTP resources and methods (GET, PUT, DELETE) are easy to understand and use.\"\n },\n {\n \"code\": null,\n \"e\": 5556,\n \"s\": 5438,\n \"text\": \"As we store data in the flexible document-based structure, there is no need to worry about the structure of the data.\"\n },\n {\n \"code\": null,\n \"e\": 5674,\n \"s\": 5556,\n \"text\": \"As we store data in the flexible document-based structure, there is no need to worry about the structure of the data.\"\n },\n {\n \"code\": null,\n \"e\": 5786,\n \"s\": 5674,\n \"text\": \"Users are provided with powerful data mapping, which allows querying, combining, and filtering the information.\"\n },\n {\n \"code\": null,\n \"e\": 5898,\n \"s\": 5786,\n \"text\": \"Users are provided with powerful data mapping, which allows querying, combining, and filtering the information.\"\n },\n {\n \"code\": null,\n \"e\": 6030,\n \"s\": 5898,\n \"text\": \"CouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines.\"\n },\n {\n \"code\": null,\n \"e\": 6162,\n \"s\": 6030,\n \"text\": \"CouchDB provides easy-to-use replication, using which you can copy, share, and synchronize the data between databases and machines.\"\n },\n {\n \"code\": null,\n \"e\": 6225,\n \"s\": 6162,\n \"text\": \"Database is the outermost data structure/container in CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 6288,\n \"s\": 6225,\n \"text\": \"Database is the outermost data structure/container in CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 6344,\n \"s\": 6288,\n \"text\": \"Each database is a collection of independent documents.\"\n },\n {\n \"code\": null,\n \"e\": 6400,\n \"s\": 6344,\n \"text\": \"Each database is a collection of independent documents.\"\n },\n {\n \"code\": null,\n \"e\": 6464,\n \"s\": 6400,\n \"text\": \"Each document maintains its own data and self-contained schema.\"\n },\n {\n \"code\": null,\n \"e\": 6528,\n \"s\": 6464,\n \"text\": \"Each document maintains its own data and self-contained schema.\"\n },\n {\n \"code\": null,\n \"e\": 6674,\n \"s\": 6528,\n \"text\": \"Document metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected.\"\n },\n {\n \"code\": null,\n \"e\": 6820,\n \"s\": 6674,\n \"text\": \"Document metadata contains revision information, which makes it possible to merge the differences occurred while the databases were disconnected.\"\n },\n {\n \"code\": null,\n \"e\": 6934,\n \"s\": 6820,\n \"text\": \"CouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes.\"\n },\n {\n \"code\": null,\n \"e\": 7048,\n \"s\": 6934,\n \"text\": \"CouchDB implements multi version concurrency control, to avoid the need to lock the database field during writes.\"\n },\n {\n \"code\": null,\n \"e\": 7276,\n \"s\": 7048,\n \"text\": \"CouchDB is a document storage NoSQL database. It provides the facility of storing documents with unique names, and it also provides an API called RESTful HTTP API for reading and updating (add, edit, delete) database documents.\"\n },\n {\n \"code\": null,\n \"e\": 7525,\n \"s\": 7276,\n \"text\": \"In CouchDB, documents are the primary unit of data and they also include metadata. Document fields are uniquely named and contain values of varying types (text, number, Boolean, lists, etc.), and there is no set limit to text size or element count.\"\n },\n {\n \"code\": null,\n \"e\": 7706,\n \"s\": 7525,\n \"text\": \"Document updates (add, edit, delete) follow Atomicity, i.e., they will be saved completely or not saved at all. The database will not have any partially saved or edited documents. \"\n },\n {\n \"code\": null,\n \"e\": 7777,\n \"s\": 7706,\n \"text\": \"{\\n \\\"field\\\" : \\\"value\\\",\\n \\\"field\\\" : \\\"value\\\",\\n \\\"field\\\" : \\\"value\\\",\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 7834,\n \"s\": 7777,\n \"text\": \"CouchDB contains ACID properties as one of its features.\"\n },\n {\n \"code\": null,\n \"e\": 8028,\n \"s\": 7834,\n \"text\": \"Consistency − When the data in CouchDB was once committed, then this data will not be modified or overwritten. Thus, CouchDB ensures that the database file will always be in a consistent state.\"\n },\n {\n \"code\": null,\n \"e\": 8231,\n \"s\": 8028,\n \"text\": \"A multi-Version Concurrency Control (MVCC) model is used by CouchDB reads, because of which the client will see a consistent snapshot of the database from the beginning to the end of the read operation.\"\n },\n {\n \"code\": null,\n \"e\": 8516,\n \"s\": 8231,\n \"text\": \"Whenever a documents is updated, CouchDB flushes the data into the disk, and the updated database header is written in two consecutive and identical chunks to make up the first 4k of the file, and then synchronously flushed to disk. Partial updates during the flush will be discarded.\"\n },\n {\n \"code\": null,\n \"e\": 8794,\n \"s\": 8516,\n \"text\": \"If the failure occurred while committing the header, a surviving copy of the previous identical headers will remain, ensuring coherency of all previously committed data. Except the header area, consistency checks or fix-ups after a crash or a power failure are never necessary.\"\n },\n {\n \"code\": null,\n \"e\": 9160,\n \"s\": 8794,\n \"text\": \"Whenever the space in the database file got wasted above certain extent, all the active data will be copied (cloned) to a new file. When the copying process is entirely done, then the old file will be discarded. All this is done by compaction process. The database remains online during the compaction and all updates and reads are allowed to complete successfully.\"\n },\n {\n \"code\": null,\n \"e\": 9476,\n \"s\": 9160,\n \"text\": \"Data in CouchDB is stored in semi-structured documents that are flexible with individual implicit structures, but it is a simple document model for data storage and sharing. If we want see our data in many different ways, we need a way to filter, organize and report on data that hasn’t been decomposed into tables.\"\n },\n {\n \"code\": null,\n \"e\": 9849,\n \"s\": 9476,\n \"text\": \"To solve this problem, CouchDB provides a view model. Views are the method of aggregating and reporting on the documents in a database, and are built on-demand to aggregate, join and report on database documents. Because views are built dynamically and don’t affect the underlying document, you can have as many different view representations of the same data as you like.\"\n },\n {\n \"code\": null,\n \"e\": 9901,\n \"s\": 9849,\n \"text\": \"CouchDB was written in Erlang programming language.\"\n },\n {\n \"code\": null,\n \"e\": 9940,\n \"s\": 9901,\n \"text\": \"It was started by Damien Katz in 2005.\"\n },\n {\n \"code\": null,\n \"e\": 9982,\n \"s\": 9940,\n \"text\": \"CouchDB became an Apache project in 2008.\"\n },\n {\n \"code\": null,\n \"e\": 10022,\n \"s\": 9982,\n \"text\": \"The current version of CouchDB is 1.61.\"\n },\n {\n \"code\": null,\n \"e\": 10107,\n \"s\": 10022,\n \"text\": \"This chapter teaches you how to install CouchDB in windows as well as Linux systems.\"\n },\n {\n \"code\": null,\n \"e\": 10274,\n \"s\": 10107,\n \"text\": \"The official website for CouchDB is https://couchdb.apache.org. If you click the given link, you can get the home page of the CouchDB official website as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 10445,\n \"s\": 10274,\n \"text\": \"If you click on the download button that will lead to a page where download links of CouchDB in various formats are provided. The following snapshot illustrates the same.\"\n },\n {\n \"code\": null,\n \"e\": 10553,\n \"s\": 10445,\n \"text\": \"Choose the download link for windows systems and select one of the provided mirrors to start your download.\"\n },\n {\n \"code\": null,\n \"e\": 10713,\n \"s\": 10553,\n \"text\": \"CouchDB will be downloaded to your system in the form of setup file named setup-couchdb-1.6.1_R16B02.exe. Run the setup file and proceed with the\\ninstallation.\"\n },\n {\n \"code\": null,\n \"e\": 10918,\n \"s\": 10713,\n \"text\": \"After installation, open built-in web interface of CouchDB by visiting the following \\nlink: http://127.0.0.1:5984/. If everything goes fine, this will give you a web page, which have the following output.\"\n },\n {\n \"code\": null,\n \"e\": 11093,\n \"s\": 10918,\n \"text\": \"{\\n \\\"couchdb\\\":\\\"Welcome\\\",\\\"uuid\\\":\\\"c8d48ac61bb497f4692b346e0f400d60\\\",\\n \\\"version\\\":\\\"1.6.1\\\",\\n \\\"vendor\\\":{\\n \\\"version\\\":\\\"1.6.1\\\",\\\"name\\\":\\\"The Apache Software Foundation\\\"\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 11170,\n \"s\": 11093,\n \"text\": \"You can interact with the CouchDB web interface by using the following url −\"\n },\n {\n \"code\": null,\n \"e\": 11201,\n \"s\": 11170,\n \"text\": \"http://127.0.0.1:5984/_utils/\\n\"\n },\n {\n \"code\": null,\n \"e\": 11280,\n \"s\": 11201,\n \"text\": \"This shows you the index page of Futon, which is the web interface of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 11402,\n \"s\": 11280,\n \"text\": \"For many of the Linux flavored systems, they provide CouchDB internally. To install this CouchDB follow the instructions.\"\n },\n {\n \"code\": null,\n \"e\": 11437,\n \"s\": 11402,\n \"text\": \"On Ubuntu and Debian you can use −\"\n },\n {\n \"code\": null,\n \"e\": 11467,\n \"s\": 11437,\n \"text\": \"sudo aptitude install couchdb\"\n },\n {\n \"code\": null,\n \"e\": 11521,\n \"s\": 11467,\n \"text\": \"On Gentoo Linux there is a CouchDB ebuild available −\"\n },\n {\n \"code\": null,\n \"e\": 11541,\n \"s\": 11521,\n \"text\": \"sudo emerge couchdb\"\n },\n {\n \"code\": null,\n \"e\": 11650,\n \"s\": 11541,\n \"text\": \"If your Linux system does not have CouchDB, follow the next section to install CouchDB and its dependencies.\"\n },\n {\n \"code\": null,\n \"e\": 11744,\n \"s\": 11650,\n \"text\": \"Following is the list of dependencies that are to be installed to get CouchDB in your system−\"\n },\n {\n \"code\": null,\n \"e\": 11755,\n \"s\": 11744,\n \"text\": \"Erlang OTP\"\n },\n {\n \"code\": null,\n \"e\": 11759,\n \"s\": 11755,\n \"text\": \"ICU\"\n },\n {\n \"code\": null,\n \"e\": 11767,\n \"s\": 11759,\n \"text\": \"OpenSSL\"\n },\n {\n \"code\": null,\n \"e\": 11788,\n \"s\": 11767,\n \"text\": \"Mozilla SpiderMonkey\"\n },\n {\n \"code\": null,\n \"e\": 11797,\n \"s\": 11788,\n \"text\": \"GNU Make\"\n },\n {\n \"code\": null,\n \"e\": 11821,\n \"s\": 11797,\n \"text\": \"GNU Compiler Collection\"\n },\n {\n \"code\": null,\n \"e\": 11829,\n \"s\": 11821,\n \"text\": \"libcurl\"\n },\n {\n \"code\": null,\n \"e\": 11838,\n \"s\": 11829,\n \"text\": \"help2man\"\n },\n {\n \"code\": null,\n \"e\": 11854,\n \"s\": 11838,\n \"text\": \"Python for docs\"\n },\n {\n \"code\": null,\n \"e\": 11868,\n \"s\": 11854,\n \"text\": \"Python Sphinx\"\n },\n {\n \"code\": null,\n \"e\": 12062,\n \"s\": 11868,\n \"text\": \"To install these dependencies, type the following commands in the terminal. Here we are using Centos 6.5 and the following commands will install the required softwares compatible to Centos 6.5.\"\n },\n {\n \"code\": null,\n \"e\": 12481,\n \"s\": 12062,\n \"text\": \"$sudo yum install autoconf\\n$sudo yum install autoconf-archive\\n$sudo yum install automake\\n$sudo yum install curl-devel\\n$sudo yum install erlang-asn1\\n$sudo yum install erlang-erts\\n$sudo yum install erlang-eunit\\n$sudo yum install erlang-os_mon\\n$sudo yum install erlang-xmerl\\n$sudo yum install help2man\\n$sudo yum install js-devel\\n$sudo yum install libicu-devel\\n$sudo yum install libtool\\n$sudo yum install perl-Test-Harness\"\n },\n {\n \"code\": null,\n \"e\": 12593,\n \"s\": 12481,\n \"text\": \"Note − For all these commands you need to use sudo. The following procedure converts a normal user to a sudoer.\"\n },\n {\n \"code\": null,\n \"e\": 12621,\n \"s\": 12593,\n \"text\": \"Login as root in admin user\"\n },\n {\n \"code\": null,\n \"e\": 12649,\n \"s\": 12621,\n \"text\": \"Login as root in admin user\"\n },\n {\n \"code\": null,\n \"e\": 12694,\n \"s\": 12649,\n \"text\": \"Open sudo file using the following command −\"\n },\n {\n \"code\": null,\n \"e\": 12739,\n \"s\": 12694,\n \"text\": \"Open sudo file using the following command −\"\n },\n {\n \"code\": null,\n \"e\": 12746,\n \"s\": 12739,\n \"text\": \"visudo\"\n },\n {\n \"code\": null,\n \"e\": 12822,\n \"s\": 12746,\n \"text\": \"Then edit as shown below to give your existing user the sudoer privileges −\"\n },\n {\n \"code\": null,\n \"e\": 12898,\n \"s\": 12822,\n \"text\": \"Hadoop All=(All) All , and press esc : x to write the changes to the file. \"\n },\n {\n \"code\": null,\n \"e\": 13004,\n \"s\": 12898,\n \"text\": \"After downloading all the dependencies in your system, download CouchDB following the given instructions.\"\n },\n {\n \"code\": null,\n \"e\": 13128,\n \"s\": 13004,\n \"text\": \"Apache software foundation will not provide the complete .tar file for CouchDB,\\nso you have to install it from the source. \"\n },\n {\n \"code\": null,\n \"e\": 13270,\n \"s\": 13128,\n \"text\": \"Create a new directory to install CouchDB, browse to such created directory and download CouchDB source by executing the following commands −\"\n },\n {\n \"code\": null,\n \"e\": 13432,\n \"s\": 13270,\n \"text\": \"$ cd\\n$ mkdir CouchDB\\n$ cd CouchDB/\\n$ wget\\nhttp://www.google.com/url?q=http%3A%2F%2Fwww.apache.org%2Fdist%2Fcouchdb%2Fsource%2F1.6.1%2Fapache-couchdb-1.6.1.tar.gz\"\n },\n {\n \"code\": null,\n \"e\": 13547,\n \"s\": 13432,\n \"text\": \"This will download CouchDB source file into your system. Now unzip the apache-couchdb-1.6.1.tar.gz as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 13586,\n \"s\": 13547,\n \"text\": \"$ tar zxvf apache-couchdb-1.6.1.tar.gz\"\n },\n {\n \"code\": null,\n \"e\": 13627,\n \"s\": 13586,\n \"text\": \"To configure CouchDB, do the following −\"\n },\n {\n \"code\": null,\n \"e\": 13665,\n \"s\": 13627,\n \"text\": \"Browse to the home folder of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 13685,\n \"s\": 13665,\n \"text\": \"Login as superuser.\"\n },\n {\n \"code\": null,\n \"e\": 13737,\n \"s\": 13685,\n \"text\": \"Configure using ./configure prompt as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 13837,\n \"s\": 13737,\n \"text\": \"$ cd apache-couchdb-1.6.1\\n$ su\\nPassword:\\n# ./configure --with-erlang=/usr/lib64/erlang/usr/include/\"\n },\n {\n \"code\": null,\n \"e\": 13982,\n \"s\": 13837,\n \"text\": \"It gives you the following output similar to that shown below with a concluding\\nline saying − You have configured Apache CouchDB, time to relax.\"\n },\n {\n \"code\": null,\n \"e\": 14831,\n \"s\": 13982,\n \"text\": \"# ./configure --with-erlang=/usr/lib64/erlang/usr/include/\\n\\nchecking for a BSD-compatible install... /usr/bin/install -c\\nchecking whether build environment is sane... yes\\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\\nchecking for gawk... gawk\\nchecking whether make sets $(MAKE)... yes\\nchecking how to create a ustar tar archive... gnutar\\n.................................................................\\n............................\\nconfig.status: creating var/Makefile\\nconfig.status: creating config.h\\nconfig.status: config.h is unchanged\\nconfig.status: creating src/snappy/google-snappy/config.h\\nconfig.status: src/snappy/google-snappy/config.h is unchanged\\nconfig.status: executing depfiles commands\\nconfig.status: executing libtool commands\\n\\nYou have configured Apache CouchDB, time to relax.\\n\\nRun `make && sudo make install' to install.\"\n },\n {\n \"code\": null,\n \"e\": 14897,\n \"s\": 14831,\n \"text\": \"Now type the following command to install CouchDB in your system.\"\n },\n {\n \"code\": null,\n \"e\": 14925,\n \"s\": 14897,\n \"text\": \"# make && sudo make install\"\n },\n {\n \"code\": null,\n \"e\": 15042,\n \"s\": 14925,\n \"text\": \"It installs CouchDB in your system with a concluding line saying − You have installed Apache CouchDB, time to relax.\"\n },\n {\n \"code\": null,\n \"e\": 15126,\n \"s\": 15042,\n \"text\": \"To start CouchDB, browse to the CouchDB home folder and use the following command −\"\n },\n {\n \"code\": null,\n \"e\": 15177,\n \"s\": 15126,\n \"text\": \"$ cd apache-couchdb-1.6.1\\n$ cd etc\\n$ couchdb start\"\n },\n {\n \"code\": null,\n \"e\": 15226,\n \"s\": 15177,\n \"text\": \"It starts CouchDB giving the following output: −\"\n },\n {\n \"code\": null,\n \"e\": 15500,\n \"s\": 15226,\n \"text\": \"Apache CouchDB 1.6.1 (LogLevel=info) is starting.\\nApache CouchDB has started. Time to relax.\\n[info] [lt;0.31.0gt;] Apache CouchDB has started on http://127.0.0.1:5984/\\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET / 200\\n[info] [lt;0.112.0gt;] 127.0.0.1 - - GET /favicon.ico 200\\n\"\n },\n {\n \"code\": null,\n \"e\": 15589,\n \"s\": 15500,\n \"text\": \"Since CouchDB is a web interface, try to type the following homepage url in the browser.\"\n },\n {\n \"code\": null,\n \"e\": 15612,\n \"s\": 15589,\n \"text\": \"http://127.0.0.1:5984/\"\n },\n {\n \"code\": null,\n \"e\": 15647,\n \"s\": 15612,\n \"text\": \"It produces the following output −\"\n },\n {\n \"code\": null,\n \"e\": 15834,\n \"s\": 15647,\n \"text\": \"{\\n \\\"couchdb\\\":\\\"Welcome\\\",\\n \\\"uuid\\\":\\\"8f0d59acd0e179f5e9f0075fa1f5e804\\\",\\n \\\"version\\\":\\\"1.6.1\\\",\\n \\\"vendor\\\":{\\n \\\"name\\\":\\\"The Apache Software Foundation\\\",\\n \\\"version\\\":\\\"1.6.1\\\"\\n }\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 15885,\n \"s\": 15834,\n \"text\": \"cURL utility is a way to communicate with CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 16260,\n \"s\": 15885,\n \"text\": \"It is a tool to transfer data from or to a server, using one of the supported protocols (HTTP, HTTPS, FTP, FTPS, TFTP, DICT, TELNET, LDAP or FILE). The command is designed to work without user interaction. cURL offers a busload of useful tricks like proxy support, user authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file transfer resume and more.\"\n },\n {\n \"code\": null,\n \"e\": 16512,\n \"s\": 16260,\n \"text\": \"The cURL utility is available in operating systems such as UNIX, Linux, Mac OS X and Windows. It is a command line utility using which user can access HTTP protocol straight away from the command line. This chapter teaches you how to use cURL utility.\"\n },\n {\n \"code\": null,\n \"e\": 16629,\n \"s\": 16512,\n \"text\": \"You can access any website using cURL utility by simply typing cURL followed by the website address as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 16658,\n \"s\": 16629,\n \"text\": \"curl www.tutorialspoint.com/\"\n },\n {\n \"code\": null,\n \"e\": 16780,\n \"s\": 16658,\n \"text\": \"By default, the cURL utility returns the source code of the requested page. It displays this code on the terminal window.\"\n },\n {\n \"code\": null,\n \"e\": 16875,\n \"s\": 16780,\n \"text\": \"cURL utility provides various options to work with, and you can see them in cURL utility help.\"\n },\n {\n \"code\": null,\n \"e\": 16927,\n \"s\": 16875,\n \"text\": \"The following code shows some portion of cURL help.\"\n },\n {\n \"code\": null,\n \"e\": 19487,\n \"s\": 16927,\n \"text\": \"$ curl --help\\nUsage: curl [options...] \\nOptions: (H) means HTTP/HTTPS only, (F) means FTP only\\n --anyauth Pick \\\"any\\\" authentication method (H)\\n -a/--append Append to target file when uploading (F/SFTP)\\n --basic Use HTTP Basic Authentication (H)\\n --cacert CA certificate to verify peer against (SSL)\\n-d/--data HTTP POST data (H)\\n --data-ascii HTTP POST ASCII data (H)\\n --data-binary HTTP POST binary data (H)\\n --data-urlencode HTTP POST data\\nurlencoded (H)\\n --delegation STRING GSS-API delegation permission\\n --digest Use HTTP Digest Authentication (H)\\n --disable-eprt Inhibit using EPRT or LPRT (F)\\n --disable-epsv Inhibit using EPSV (F)\\n\\n -F/--form Specify HTTP multipart POST data (H)\\n --form-string Specify HTTP multipart POST data (H)\\n --ftp-account Account data to send when requested by server\\n(F)\\n --ftp-alternative-to-user String to replace \\\"USER [name]\\\" (F)\\n --ftp-create-dirs Create the remote dirs if not present (F)\\n --ftp-method [multi cwd/no cwd/single cwd] Control CWD usage (F)\\n --ftp-pasv Use PASV/EPSV instead of PORT (F)\\n\\n -G/--get Send the -d data with a HTTP GET (H)\\n\\n -H/--header Custom header to pass to server (H)\\n -I/--head Show document info only\\n -h/--help This help text\\n --hostpubmd5 Hex encoded MD5 string of the host public key.\\n(SSH)\\n -0/--http1.0 Use HTTP 1.0 (H)\\n --ignore-content-length Ignore the HTTP Content-Length header\\n -i/--include Include protocol headers in the output (H/F)\\n\\n -M/--manual Display the full manual\\n\\n -o/--output Write output to instead of stdout\\n --pass Pass phrase for the private key (SSL/SSH)\\n --post301 Do not switch to GET after following a 301\\nredirect (H)\\n --post302 Do not switch to GET after following a 302\\nredirect (H)\\n -O/--remote-name Write output to a file named as the remote file\\n --remote-name-all Use the remote file name for all URLs\\n -R/--remote-time Set the remote file's time on the local output\\n -X/--request Specify request command to use\\n --retry Retry request times if transient problems\\noccur\\n --retry-delay When retrying, wait this many seconds\\nbetween each\\n --retry-max-time Retry only within this period\\n -T/--upload-file Transfer to remote site\\n --url Set URL to work with\\n -B/--use-ascii Use ASCII/text transfer\"\n },\n {\n \"code\": null,\n \"e\": 19688,\n \"s\": 19487,\n \"text\": \"While communicating with CouchDB, certain options of cURL utility were extensively used. Following are the brief descriptions of some important options of cURL utility including those used by CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 19929,\n \"s\": 19688,\n \"text\": \"(HTTP) Specifies a custom request method used when communicating with the HTTP server. The specified request is used instead of the method otherwise used (which defaults to GET). Read the HTTP 1.1 specification for details and explanations.\"\n },\n {\n \"code\": null,\n \"e\": 20021,\n \"s\": 19929,\n \"text\": \"(FTP) Specifies a custom FTP command to use instead of LIST when doing file lists with ftp.\"\n },\n {\n \"code\": null,\n \"e\": 20541,\n \"s\": 20021,\n \"text\": \"(HTTP) Extra header is used when getting a web page. Note that if you add a custom header that has the same name as one of the internal ones cURL would\\nuse, your externally set header will be used instead of the internal one. This allows you to make even trickier work than cURL would normally do. You should not replace internally set headers without perfectly knowing what you’re doing. Replacing an internal header with the one without content on the right side of the colon, will prevent that header from appearing.\"\n },\n {\n \"code\": null,\n \"e\": 20749,\n \"s\": 20541,\n \"text\": \"cURL assures that each header you add/replace get sent with the proper end of line marker. Neither you should add that as a part of the header content nor add newlines or carriage returns to disorder things.\"\n },\n {\n \"code\": null,\n \"e\": 20804,\n \"s\": 20749,\n \"text\": \"See also the -A/--user-agent and -e/--referer options.\"\n },\n {\n \"code\": null,\n \"e\": 20883,\n \"s\": 20804,\n \"text\": \"This option can be used multiple times to add/replace/remove multiple headers.\"\n },\n {\n \"code\": null,\n \"e\": 21033,\n \"s\": 20883,\n \"text\": \"Using this flag of cURL, you can send data along with the HTTP POST request to the server, as if it was filled by the user in the form and submitted.\"\n },\n {\n \"code\": null,\n \"e\": 21041,\n \"s\": 21033,\n \"text\": \"Example\"\n },\n {\n \"code\": null,\n \"e\": 21177,\n \"s\": 21041,\n \"text\": \"Suppose there is a website and you want to login into it or send some data to the website using –d flag of cURL utility as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 21262,\n \"s\": 21177,\n \"text\": \"curl -X PUT http://mywebsite.com/login.html -d userid=001 -d password=tutorialspoint\"\n },\n {\n \"code\": null,\n \"e\": 21398,\n \"s\": 21262,\n \"text\": \"It sends a post chunk that looks like \\\"userid=001&password=tutorialspoint\\\". Likewise you can also send documents (JSON ) using -d flag.\"\n },\n {\n \"code\": null,\n \"e\": 21464,\n \"s\": 21398,\n \"text\": \"Using this flag, cURL writes the output of the request to a file.\"\n },\n {\n \"code\": null,\n \"e\": 21472,\n \"s\": 21464,\n \"text\": \"Example\"\n },\n {\n \"code\": null,\n \"e\": 21536,\n \"s\": 21472,\n \"text\": \"The following example shows the use of -o flag of cURL utility.\"\n },\n {\n \"code\": null,\n \"e\": 21762,\n \"s\": 21536,\n \"text\": \"$ curl -o example.html www.tutorialspoint.com/index.htm \\n% Total % Received % Xferd Average Speed Time Time Time Current \\n Dload Upload Total Spent Left Speed\\n100 81193 0 81193 0 0 48168 0 --:--:-- 0:00:01 --:--:--\\n58077\"\n },\n {\n \"code\": null,\n \"e\": 21913,\n \"s\": 21762,\n \"text\": \"This gets the source code of the homepage of tutorialspoint.com, creates a file named example.com and saves the output in the file named example.html.\"\n },\n {\n \"code\": null,\n \"e\": 21960,\n \"s\": 21913,\n \"text\": \"Following is the snapshot of the example.html.\"\n },\n {\n \"code\": null,\n \"e\": 22154,\n \"s\": 21960,\n \"text\": \"This flag is similar to –o, the only difference is with this flag, a new file with the same name as the requested url was created, and the source code of the requested url will be copied to it.\"\n },\n {\n \"code\": null,\n \"e\": 22162,\n \"s\": 22154,\n \"text\": \"Example\"\n },\n {\n \"code\": null,\n \"e\": 22226,\n \"s\": 22162,\n \"text\": \"The following example shows the use of -O flag of cURL utility.\"\n },\n {\n \"code\": null,\n \"e\": 22437,\n \"s\": 22226,\n \"text\": \"$ curl -O www.tutorialspoint.com/index.htm\\n% Total % Received % Xferd Average Speed Time Time Time Current\\n Dload Upload Total Spent Left\\nSpeed\\n100 81285 0 81285 0 0 49794 0 --:--:-- 0:00:01 --:--:--\\n60077\"\n },\n {\n \"code\": null,\n \"e\": 22556,\n \"s\": 22437,\n \"text\": \"It creates a new file with the name index.htm and saves the source code of the index page of tutorialspoint.com in it.\"\n },\n {\n \"code\": null,\n \"e\": 22848,\n \"s\": 22556,\n \"text\": \"You can access the homepage of the CouchDB by sending a GET request to the CouchDB instance installed. First of all make sure you have installed CouchDB in your Linux environment and it is running successfully, and then use the following syntax to send a get request to the CouchDB instance.\"\n },\n {\n \"code\": null,\n \"e\": 22876,\n \"s\": 22848,\n \"text\": \"curl http://127.0.0.1:5984/\"\n },\n {\n \"code\": null,\n \"e\": 23031,\n \"s\": 22876,\n \"text\": \"This gives you a JSON document as shown below where CouchDB specifies the details such as version number, name of the vendor, and version of the software.\"\n },\n {\n \"code\": null,\n \"e\": 23256,\n \"s\": 23031,\n \"text\": \"$ curl http://127.0.0.1:5984/\\n{\\n \\\"couchdb\\\" : \\\"Welcome\\\",\\n \\\"uuid\\\" : \\\"8f0d59acd0e179f5e9f0075fa1f5e804\\\",\\n \\\"version\\\" : \\\"1.6.1\\\",\\n \\\"vendor\\\" : {\\n \\\"name\\\":\\\"The Apache Software Foundation\\\",\\n \\\"version\\\":\\\"1.6.1\\\"\\n }\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 23443,\n \"s\": 23256,\n \"text\": \"You can get the list of all the databases created, by sending a get request along with the string \\\"_all_dbs string \\\". Following is the syntax to get the list of all databases in CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 23486,\n \"s\": 23443,\n \"text\": \"curl -X GET http://127.0.0.1:5984/_all_dbs\"\n },\n {\n \"code\": null,\n \"e\": 23552,\n \"s\": 23486,\n \"text\": \"It gives you the list of all databases in CouchDB as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 23626,\n \"s\": 23552,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/_all_dbs\\n[ \\\"_replicator\\\" , \\\"_users\\\" ]\"\n },\n {\n \"code\": null,\n \"e\": 23719,\n \"s\": 23626,\n \"text\": \"You can create a database in CouchDB using cURL with PUT header using the following syntax −\"\n },\n {\n \"code\": null,\n \"e\": 23770,\n \"s\": 23719,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/database_name\\n\"\n },\n {\n \"code\": null,\n \"e\": 23870,\n \"s\": 23770,\n \"text\": \"As an example, using the above given syntax create a database with name my_database as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 23930,\n \"s\": 23870,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/my_database\\n{\\\"ok\\\":true}\"\n },\n {\n \"code\": null,\n \"e\": 24102,\n \"s\": 23930,\n \"text\": \"Verify whether the database is created, by listing out all the databases as shown\\nbelow. Here you can observe the name of newly created database, \\\"my_database\\\" in the list\"\n },\n {\n \"code\": null,\n \"e\": 24194,\n \"s\": 24102,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/_all_dbs\\n\\n[ \\\"_replicator \\\" , \\\"_users\\\" , \\\"my_database\\\" ]\"\n },\n {\n \"code\": null,\n \"e\": 24346,\n \"s\": 24194,\n \"text\": \"You can get the information about database using the get request along with the database name. Following is the syntax to get the database information.\"\n },\n {\n \"code\": null,\n \"e\": 24505,\n \"s\": 24346,\n \"text\": \"As an example let us get the information of the database named my_database as shown below. Here you can get the information about your database as a response.\"\n },\n {\n \"code\": null,\n \"e\": 24852,\n \"s\": 24505,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database\\n\\n{\\n \\\"db_name\\\" : \\\"my_database\\\",\\n \\\"doc_count\\\" : 0,\\n \\\"doc_del_count\\\" : 0,\\n \\\"update_seq\\\" : 0,\\n \\\"purge_seq\\\" : 0,\\n \\\"compact_running\\\" : false,\\n \\\"disk_size\\\" : 79,\\n \\\"data_size\\\" : 0,\\n \\\"instance_start_time\\\" : \\\"1423628520835029\\\",\\n \\\"disk_format_version\\\" : 6,\\n \\\"committed_update_seq\\\" : 0\\n }\"\n },\n {\n \"code\": null,\n \"e\": 25126,\n \"s\": 24852,\n \"text\": \"Futon is the built-in, web based, administration interface of CouchDB. It provides\\na simple graphical interface using which you can interact with CouchDB. It is a naive interface and it provides full access to all CouchDB features. Following is the list of those features −\"\n },\n {\n \"code\": null,\n \"e\": 25145,\n \"s\": 25126,\n \"text\": \"Creates databases.\"\n },\n {\n \"code\": null,\n \"e\": 25165,\n \"s\": 25145,\n \"text\": \"Destroys databases.\"\n },\n {\n \"code\": null,\n \"e\": 25184,\n \"s\": 25165,\n \"text\": \"Creates documents.\"\n },\n {\n \"code\": null,\n \"e\": 25203,\n \"s\": 25184,\n \"text\": \"Updates documents.\"\n },\n {\n \"code\": null,\n \"e\": 25220,\n \"s\": 25203,\n \"text\": \"Edits documents.\"\n },\n {\n \"code\": null,\n \"e\": 25239,\n \"s\": 25220,\n \"text\": \"Deletes documents.\"\n },\n {\n \"code\": null,\n \"e\": 25313,\n \"s\": 25239,\n \"text\": \"Make sure CouchDB is running and then open the following url in browser −\"\n },\n {\n \"code\": null,\n \"e\": 25343,\n \"s\": 25313,\n \"text\": \"http://127.0.0.1:5984/_utils/\"\n },\n {\n \"code\": null,\n \"e\": 25414,\n \"s\": 25343,\n \"text\": \"If you open this url, it displays the Futon home page as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 25634,\n \"s\": 25414,\n \"text\": \"On the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user.\"\n },\n {\n \"code\": null,\n \"e\": 25854,\n \"s\": 25634,\n \"text\": \"On the left hand side of this page you can observe the list of all the current databases of CouchDB. In this illustration, we have a database named my_database, along with system defined databases _replicator and _user.\"\n },\n {\n \"code\": null,\n \"e\": 26349,\n \"s\": 25854,\n \"text\": \"On the right hand side you can see the following −\\n\\nTools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\\nDocumentation − This section contains the complete documentation for the recent version of CouchDB.\\nDiagnostics − Under this you can verify the installation of CouchDB.\\nRecent Databases − Under this you can find the names of recently added databases.\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 26400,\n \"s\": 26349,\n \"text\": \"On the right hand side you can see the following −\"\n },\n {\n \"code\": null,\n \"e\": 26590,\n \"s\": 26400,\n \"text\": \"Tools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 26780,\n \"s\": 26590,\n \"text\": \"Tools − In this section you can find Configuration to configure CouchDB, Replicator to perform replications, and Status to verify status of CouchDB and recent modifications done on CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 26880,\n \"s\": 26780,\n \"text\": \"Documentation − This section contains the complete documentation for the recent version of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 26980,\n \"s\": 26880,\n \"text\": \"Documentation − This section contains the complete documentation for the recent version of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 27049,\n \"s\": 26980,\n \"text\": \"Diagnostics − Under this you can verify the installation of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 27118,\n \"s\": 27049,\n \"text\": \"Diagnostics − Under this you can verify the installation of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 27200,\n \"s\": 27118,\n \"text\": \"Recent Databases − Under this you can find the names of recently added databases.\"\n },\n {\n \"code\": null,\n \"e\": 27282,\n \"s\": 27200,\n \"text\": \"Recent Databases − Under this you can find the names of recently added databases.\"\n },\n {\n \"code\": null,\n \"e\": 27533,\n \"s\": 27282,\n \"text\": \"Using HTTP request headers, you can communicate with CouchDB. Through these requests we can retrieve data from the database, store data in to the database in the form of documents, and we can view as well as format the documents stored in a database.\"\n },\n {\n \"code\": null,\n \"e\": 27796,\n \"s\": 27533,\n \"text\": \"While communicating with the database we will use different request formats like get, head, post, put, delete, and copy. For all operations in CouchDB, the input data and the output data structures will be in the form of JavaScript Object Notation (JSON) object.\"\n },\n {\n \"code\": null,\n \"e\": 27891,\n \"s\": 27796,\n \"text\": \"Following are the different request formats of HTTP Protocol used to communicate with CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 28178,\n \"s\": 27891,\n \"text\": \"GET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases).\"\n },\n {\n \"code\": null,\n \"e\": 28465,\n \"s\": 28178,\n \"text\": \"GET − This format is used to get a specific item. To get different items, you have to send specific url patterns. In CouchDB using this GET request, we can get static items, database documents and configuration, and statistical information in the form of JSON documents (in most cases).\"\n },\n {\n \"code\": null,\n \"e\": 28570,\n \"s\": 28465,\n \"text\": \"HEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response.\"\n },\n {\n \"code\": null,\n \"e\": 28675,\n \"s\": 28570,\n \"text\": \"HEAD − The HEAD method is used to get the HTTP header of a GET request without the body of the response.\"\n },\n {\n \"code\": null,\n \"e\": 28861,\n \"s\": 28675,\n \"text\": \"POST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands.\"\n },\n {\n \"code\": null,\n \"e\": 29047,\n \"s\": 28861,\n \"text\": \"POST − Post request is used to upload data. In CouchDB using POST request, you can set values, upload documents, set document values, and can also start certain administration commands.\"\n },\n {\n \"code\": null,\n \"e\": 29150,\n \"s\": 29047,\n \"text\": \"PUT − Using PUT request, you can create new objects, databases, documents, views and design documents.\"\n },\n {\n \"code\": null,\n \"e\": 29253,\n \"s\": 29150,\n \"text\": \"PUT − Using PUT request, you can create new objects, databases, documents, views and design documents.\"\n },\n {\n \"code\": null,\n \"e\": 29339,\n \"s\": 29253,\n \"text\": \"DELETE − Using DELETE request, you can delete documents, views, and design documents.\"\n },\n {\n \"code\": null,\n \"e\": 29425,\n \"s\": 29339,\n \"text\": \"DELETE − Using DELETE request, you can delete documents, views, and design documents.\"\n },\n {\n \"code\": null,\n \"e\": 29487,\n \"s\": 29425,\n \"text\": \"COPY − Using COPY method, you can copy documents and objects.\"\n },\n {\n \"code\": null,\n \"e\": 29549,\n \"s\": 29487,\n \"text\": \"COPY − Using COPY method, you can copy documents and objects.\"\n },\n {\n \"code\": null,\n \"e\": 29776,\n \"s\": 29549,\n \"text\": \"HTTP headers should be supplied to get the right format and encoding. While sending the request to the CouchDB server, you can send Http request headers along with the request. Following are the different Http request headers.\"\n },\n {\n \"code\": null,\n \"e\": 30067,\n \"s\": 29776,\n \"text\": \"Content-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended.\"\n },\n {\n \"code\": null,\n \"e\": 30358,\n \"s\": 30067,\n \"text\": \"Content-type − This Header is used to specify the content type of the data that we supply to the server along with the request. Mostly the type of the content we send along with the request will be MIME type or JSON (application/json). Using Content-type on a request is highly recommended.\"\n },\n {\n \"code\": null,\n \"e\": 30775,\n \"s\": 30358,\n \"text\": \"Accept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons.\\nThough, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client.\"\n },\n {\n \"code\": null,\n \"e\": 31042,\n \"s\": 30775,\n \"text\": \"Accept − This header is used to specify the server, the list of data types that client can understand, so that the server will send its response using those data types. Generally here, you can send the list of MIME data types the client accepts, separated by colons.\"\n },\n {\n \"code\": null,\n \"e\": 31192,\n \"s\": 31042,\n \"text\": \"Though, using Accept in queries of CouchDB is not required, it is highly recommended to ensure that the data returned can be processed by the client.\"\n },\n {\n \"code\": null,\n \"e\": 31331,\n \"s\": 31192,\n \"text\": \"These are the headers of the response sent by the server. These headers give information about the content send by the server as response.\"\n },\n {\n \"code\": null,\n \"e\": 31474,\n \"s\": 31331,\n \"text\": \"Content-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain.\"\n },\n {\n \"code\": null,\n \"e\": 31617,\n \"s\": 31474,\n \"text\": \"Content-type − This header specifies the MIME type of the data returned by the server. For most request, the returned MIME type is text/plain.\"\n },\n {\n \"code\": null,\n \"e\": 31832,\n \"s\": 31617,\n \"text\": \"Cache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible.\"\n },\n {\n \"code\": null,\n \"e\": 32047,\n \"s\": 31832,\n \"text\": \"Cache-control − This header suggests the client about treating the information sent by the server. CouchDB mostly returns the must-revalidate, which indicates that the information should be revalidated if possible.\"\n },\n {\n \"code\": null,\n \"e\": 32140,\n \"s\": 32047,\n \"text\": \"Content-length − This header returns the length of the content sent by the server, in bytes.\"\n },\n {\n \"code\": null,\n \"e\": 32233,\n \"s\": 32140,\n \"text\": \"Content-length − This header returns the length of the content sent by the server, in bytes.\"\n },\n {\n \"code\": null,\n \"e\": 32308,\n \"s\": 32233,\n \"text\": \"Etag − This header is used to show the revision for a document, or a view.\"\n },\n {\n \"code\": null,\n \"e\": 32383,\n \"s\": 32308,\n \"text\": \"Etag − This header is used to show the revision for a document, or a view.\"\n },\n {\n \"code\": null,\n \"e\": 32483,\n \"s\": 32383,\n \"text\": \"Following is the tabular form of the status code sent by the http header and the description of it.\"\n },\n {\n \"code\": null,\n \"e\": 32492,\n \"s\": 32483,\n \"text\": \"200 − OK\"\n },\n {\n \"code\": null,\n \"e\": 32558,\n \"s\": 32492,\n \"text\": \"This status will be issued when a request completed successfully.\"\n },\n {\n \"code\": null,\n \"e\": 32572,\n \"s\": 32558,\n \"text\": \"201 − Created\"\n },\n {\n \"code\": null,\n \"e\": 32627,\n \"s\": 32572,\n \"text\": \"This status will be issued when a document is created.\"\n },\n {\n \"code\": null,\n \"e\": 32642,\n \"s\": 32627,\n \"text\": \"202 − Accepted\"\n },\n {\n \"code\": null,\n \"e\": 32697,\n \"s\": 32642,\n \"text\": \"This status will be issued when a request is accepted.\"\n },\n {\n \"code\": null,\n \"e\": 32713,\n \"s\": 32697,\n \"text\": \"404 − Not Found\"\n },\n {\n \"code\": null,\n \"e\": 32797,\n \"s\": 32713,\n \"text\": \"This status will be issued when the server is unable to find the requested content.\"\n },\n {\n \"code\": null,\n \"e\": 32824,\n \"s\": 32797,\n \"text\": \"405 − Resource Not Allowed\"\n },\n {\n \"code\": null,\n \"e\": 32890,\n \"s\": 32824,\n \"text\": \"This status is issued when the HTTP request type used is invalid.\"\n },\n {\n \"code\": null,\n \"e\": 32905,\n \"s\": 32890,\n \"text\": \"409 − Conflict\"\n },\n {\n \"code\": null,\n \"e\": 32966,\n \"s\": 32905,\n \"text\": \"This status is issued whenever there is any update conflict.\"\n },\n {\n \"code\": null,\n \"e\": 32989,\n \"s\": 32966,\n \"text\": \"415 − Bad Content Type\"\n },\n {\n \"code\": null,\n \"e\": 33075,\n \"s\": 32989,\n \"text\": \"This status indicated that the requested content type is not supported by the server.\"\n },\n {\n \"code\": null,\n \"e\": 33103,\n \"s\": 33075,\n \"text\": \"500 − Internal Server Error\"\n },\n {\n \"code\": null,\n \"e\": 33175,\n \"s\": 33103,\n \"text\": \"This status is issued whenever the data sent in the request is invalid.\"\n },\n {\n \"code\": null,\n \"e\": 33312,\n \"s\": 33175,\n \"text\": \"There are certain url paths using which, you can interact with the database directly. Following is the tabular format of such url paths.\"\n },\n {\n \"code\": null,\n \"e\": 33320,\n \"s\": 33312,\n \"text\": \"PUT /db\"\n },\n {\n \"code\": null,\n \"e\": 33363,\n \"s\": 33320,\n \"text\": \"This url is used to create a new database.\"\n },\n {\n \"code\": null,\n \"e\": 33371,\n \"s\": 33363,\n \"text\": \"GET /db\"\n },\n {\n \"code\": null,\n \"e\": 33440,\n \"s\": 33371,\n \"text\": \"This url is used to get the information about the existing database.\"\n },\n {\n \"code\": null,\n \"e\": 33457,\n \"s\": 33440,\n \"text\": \"PUT /db/document\"\n },\n {\n \"code\": null,\n \"e\": 33524,\n \"s\": 33457,\n \"text\": \"This url is used to create a document/update an existing document.\"\n },\n {\n \"code\": null,\n \"e\": 33541,\n \"s\": 33524,\n \"text\": \"GET /db/document\"\n },\n {\n \"code\": null,\n \"e\": 33579,\n \"s\": 33541,\n \"text\": \"This url is used to get the document.\"\n },\n {\n \"code\": null,\n \"e\": 33599,\n \"s\": 33579,\n \"text\": \"DELETE /db/document\"\n },\n {\n \"code\": null,\n \"e\": 33678,\n \"s\": 33599,\n \"text\": \"This url is used to delete the specified document from the specified database.\"\n },\n {\n \"code\": null,\n \"e\": 33705,\n \"s\": 33678,\n \"text\": \"GET /db/_design/design-doc\"\n },\n {\n \"code\": null,\n \"e\": 33766,\n \"s\": 33705,\n \"text\": \"This url is used to get the definition of a design document.\"\n },\n {\n \"code\": null,\n \"e\": 33808,\n \"s\": 33766,\n \"text\": \"GET /db/_design/designdoc/_view/view-name\"\n },\n {\n \"code\": null,\n \"e\": 33909,\n \"s\": 33808,\n \"text\": \"This url is used to access the view, view-name from the design document from the specified database.\"\n },\n {\n \"code\": null,\n \"e\": 34112,\n \"s\": 33909,\n \"text\": \"Database is the outermost data structure in CouchDB where your documents are stored. You can create these databases using cURL utility provided by CouchDB, as well as Futon the web interface of CouchDB.\"\n },\n {\n \"code\": null,\n \"e\": 34276,\n \"s\": 34112,\n \"text\": \"You can create a database in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a database −\"\n },\n {\n \"code\": null,\n \"e\": 34327,\n \"s\": 34276,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/database name\\n\"\n },\n {\n \"code\": null,\n \"e\": 34649,\n \"s\": 34327,\n \"text\": \"Using −X we can specify HTTP custom request method to be used. In this case, we are using PUT method. When we use the PUT operation/method, the content of the url specifies the object name we are creating using HTTP request. Here we have to send the name of the database using put request in the url to create a database.\"\n },\n {\n \"code\": null,\n \"e\": 34763,\n \"s\": 34649,\n \"text\": \"Using the above given syntax if you want to create a database with name my_database, you can create it as follows\"\n },\n {\n \"code\": null,\n \"e\": 34827,\n \"s\": 34763,\n \"text\": \"curl -X PUT http://127.0.0.1:5984/my_database\\n{\\n \\\"ok\\\":true\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 34950,\n \"s\": 34827,\n \"text\": \"As a response the server will return you a JSON document with content “ok” − true indicating the operation was successful.\"\n },\n {\n \"code\": null,\n \"e\": 35127,\n \"s\": 34950,\n \"text\": \"Verify whether the database is created, by listing out all the databases as shown below. Here you can observe the name of a newly created database, \\\" my_database \\\" in the list.\"\n },\n {\n \"code\": null,\n \"e\": 35223,\n \"s\": 35127,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/_all_dbs\\n\\n[ \\\"_replicator \\\" , \\\" _users \\\" , \\\" my_database \\\" ]\"\n },\n {\n \"code\": null,\n \"e\": 35347,\n \"s\": 35223,\n \"text\": \"To create a database open the http://127.0.0.1:5984/_utils/. You will get\\nan Overview/index page of CouchDB as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 35463,\n \"s\": 35347,\n \"text\": \"In this page, you can see the list of databases in CouchDB, an option button Create Database on the left hand side.\"\n },\n {\n \"code\": null,\n \"e\": 35781,\n \"s\": 35463,\n \"text\": \"Now click on the create database link. You can see a popup window Create New Databases asking for the database name for the new database. Choose any name following the mentioned criteria. Here we are creating another database with name tutorials_point. Click on the create button as shown in the following screenshot.\"\n },\n {\n \"code\": null,\n \"e\": 35942,\n \"s\": 35781,\n \"text\": \"You can delete a database in CouchDB by sending a request to the server using DELETE method through cURL utility. Following is the syntax to create a database −\"\n },\n {\n \"code\": null,\n \"e\": 35996,\n \"s\": 35942,\n \"text\": \"$ curl -X DELETE http://127.0.0.1:5984/database name\\n\"\n },\n {\n \"code\": null,\n \"e\": 36229,\n \"s\": 35996,\n \"text\": \"Using −X we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using the DELETE method. Send the url to the server by specifying the database to be deleted in it.\"\n },\n {\n \"code\": null,\n \"e\": 36369,\n \"s\": 36229,\n \"text\": \"Assume there is a database named my_database2 in CouchDB. Using the above given syntax if you want to delete it, you can do it as follows −\"\n },\n {\n \"code\": null,\n \"e\": 36440,\n \"s\": 36369,\n \"text\": \"$ curl -X DELETE http://127.0.0.1:5984/my_database2\\n{\\n \\\"ok\\\" : true\\n}\"\n },\n {\n \"code\": null,\n \"e\": 36564,\n \"s\": 36440,\n \"text\": \"As a response, the server will return you a JSON document with content “ok” − true indicating the operation was successful.\"\n },\n {\n \"code\": null,\n \"e\": 36747,\n \"s\": 36564,\n \"text\": \"Verify whether the database is deleted by listing out all the databases as shown below. Here you can observe the name of the deleted database, \\\"my_database\\\" is not there in the list.\"\n },\n {\n \"code\": null,\n \"e\": 36825,\n \"s\": 36747,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/_all_dbs\\n\\n[ \\\"_replicator \\\" , \\\" _users \\\" ]\"\n },\n {\n \"code\": null,\n \"e\": 36959,\n \"s\": 36825,\n \"text\": \"To delete a database, open the http://127.0.0.1:5984/_utils/ url where you will get an Overview/index page of CouchDB as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 37294,\n \"s\": 36959,\n \"text\": \"Here you can see three user created databases. Let us delete the database named tutorials_point2. To delete a database, select one from the list of databases, and click on it, which will lead to the overview page of the selected database where you can see the various operations on databases. The following screenshot shows the same −\"\n },\n {\n \"code\": null,\n \"e\": 37468,\n \"s\": 37294,\n \"text\": \"Among them you can find Delete Database option. By clicking on it you will get a popup window, asking whether you are sure! Click on delete, to delete the selected database.\"\n },\n {\n \"code\": null,\n \"e\": 37754,\n \"s\": 37468,\n \"text\": \"Documents are CouchDB’s central data structure. Contents of the database will be stored in the form of Documents instead of tables. You can create these documents using cURL utility provided by CouchDB, as well as Futon. This chapter covers the ways to create a document in a database.\"\n },\n {\n \"code\": null,\n \"e\": 38034,\n \"s\": 37754,\n \"text\": \"Each document in CouchDB has a unique ID. You can choose your own ID that should be in the form of a string. Generally, UUID (Universally Unique IDentifier) is used, which are random numbers that have least chance of creating a duplicate. These are preferred to avoid collisions.\"\n },\n {\n \"code\": null,\n \"e\": 38197,\n \"s\": 38034,\n \"text\": \"You can create a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to create a document.\"\n },\n {\n \"code\": null,\n \"e\": 38271,\n \"s\": 38197,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/database name/\\\"id\\\" -d ' { document} '\"\n },\n {\n \"code\": null,\n \"e\": 38577,\n \"s\": 38271,\n \"text\": \"Using −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using PUT method. When we use the PUT method, the content of the url specifies the object name we are creating using the HTTP request. Here we have to send the following −\"\n },\n {\n \"code\": null,\n \"e\": 38646,\n \"s\": 38577,\n \"text\": \"The name of the database name in which we are creating the document.\"\n },\n {\n \"code\": null,\n \"e\": 38715,\n \"s\": 38646,\n \"text\": \"The name of the database name in which we are creating the document.\"\n },\n {\n \"code\": null,\n \"e\": 38732,\n \"s\": 38715,\n \"text\": \"The document id.\"\n },\n {\n \"code\": null,\n \"e\": 38749,\n \"s\": 38732,\n \"text\": \"The document id.\"\n },\n {\n \"code\": null,\n \"e\": 38962,\n \"s\": 38749,\n \"text\": \"The data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 39175,\n \"s\": 38962,\n \"text\": \"The data of the document. −d option is used to send the data/document through HTTP request. While writing a document simply enter your Field-Value pairs separated by colon, within flower brackets as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 39232,\n \"s\": 39175,\n \"text\": \"{\\n Name : Raju\\n age : 23\\n Designation : Designer\\n}\"\n },\n {\n \"code\": null,\n \"e\": 39377,\n \"s\": 39232,\n \"text\": \"Using the above given syntax if you want to create a document with id 001 in a database with name my_database, you can create it as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 39578,\n \"s\": 39377,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/my_database/\\\"001\\\" -d\\n'{ \\\" Name \\\" : \\\" Raju \\\" , \\\" age \\\" :\\\" 23 \\\" , \\\" Designation \\\" : \\\" Designer \\\" }'\\n\\n{\\\"ok\\\":true,\\\"id\\\":\\\"001\\\",\\\"rev\\\":\\\"1-1c2fae390fa5475d9b809301bbf3f25e\\\"}\"\n },\n {\n \"code\": null,\n \"e\": 39642,\n \"s\": 39578,\n \"text\": \"The response of CouchDB to this request contains three fields −\"\n },\n {\n \"code\": null,\n \"e\": 39689,\n \"s\": 39642,\n \"text\": \"\\\"ok\\\", indicating the operation was successful.\"\n },\n {\n \"code\": null,\n \"e\": 39736,\n \"s\": 39689,\n \"text\": \"\\\"ok\\\", indicating the operation was successful.\"\n },\n {\n \"code\": null,\n \"e\": 39782,\n \"s\": 39736,\n \"text\": \"\\\"id\\\", which stores the id of the document and\"\n },\n {\n \"code\": null,\n \"e\": 39828,\n \"s\": 39782,\n \"text\": \"\\\"id\\\", which stores the id of the document and\"\n },\n {\n \"code\": null,\n \"e\": 40205,\n \"s\": 39828,\n \"text\": \"\\\"rev\\\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control.\"\n },\n {\n \"code\": null,\n \"e\": 40582,\n \"s\": 40205,\n \"text\": \"\\\"rev\\\", this indicates the revision id. Every time you revise (update or modify) a document a _rev value will be generated by CouchDB. If you want to update or delete a document, CouchDB expects you to include the _rev field of the revision you wish to change. When CouchDB accepts the change, it will generate a new revision number. This mechanism ensures concurrency control.\"\n },\n {\n \"code\": null,\n \"e\": 40673,\n \"s\": 40582,\n \"text\": \"If you want to view the created document you can get it using the document as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 40857,\n \"s\": 40673,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\"_id\\\": \\\"001\\\",\\n \\\"_rev\\\": \\\"1-3fcc78daac7a90803f0a5e383f4f1e1e\\\",\\n \\\"Name\\\": \\\"Raju\\\",\\n \\\"age\\\": 23,\\n \\\"Designation\\\": \\\"Designer\\\"\\n}\"\n },\n {\n \"code\": null,\n \"e\": 40978,\n \"s\": 40857,\n \"text\": \"To Create a document open the http://127.0.0.1:5984/_utils/ url to get an Overview/index page of CouchDB as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 41126,\n \"s\": 40978,\n \"text\": \"Select the database in which you want to create the document. Open the Overview page of the database and select New Document option as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 41397,\n \"s\": 41126,\n \"text\": \"When you select the New Document option, CouchDB creates a new database document, assigning it a new id. You can edit the value of the id and can assign your own value in the form of a string. In the following illustration, we have created a new document with an id 001.\"\n },\n {\n \"code\": null,\n \"e\": 41491,\n \"s\": 41397,\n \"text\": \"In this page, you can observe three options − save Document, Add Field and Upload Attachment.\"\n },\n {\n \"code\": null,\n \"e\": 41844,\n \"s\": 41491,\n \"text\": \"To add field to the document click on Add Field option. After creating a database, you can add a field to it using this option. Clicking on it will get you a pair of text boxes, namely, Field, value. You can edit these values by clicking on them. Edit those values and type your desired Field-Value pair. Click on the green button to save these values.\"\n },\n {\n \"code\": null,\n \"e\": 41948,\n \"s\": 41844,\n \"text\": \"In the following illustration, we have created three fields Name, age and, Designation of the employee.\"\n },\n {\n \"code\": null,\n \"e\": 42084,\n \"s\": 41948,\n \"text\": \"You can save the changes made to the document by clicking on this option. After saving, a new id _rev will be generated as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 42247,\n \"s\": 42084,\n \"text\": \"You can update a document in CouchDB by sending an HTTP request to the server using PUT method through cURL utility. Following is the syntax to update a document.\"\n },\n {\n \"code\": null,\n \"e\": 42359,\n \"s\": 42247,\n \"text\": \"curl -X PUT http://127.0.0.1:5984/database_name/document_id/ -d '{ \\\"field\\\" : \\\"value\\\", \\\"_rev\\\" : \\\"revision id\\\" }'\"\n },\n {\n \"code\": null,\n \"e\": 42470,\n \"s\": 42359,\n \"text\": \"Suppose there is a document with id 001 in the database named my_database. You can delete this as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 42648,\n \"s\": 42470,\n \"text\": \"First of all, get the revision id of the document that is to be updated. You can find the _rev of the document in the document itself, therefore get the document as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 42790,\n \"s\": 42648,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\"_id\\\" : \\\"001\\\",\\n \\\"_rev\\\" : \\\"2-04d8eac1680d237ca25b68b36b8899d3 \\\" ,\\n \\\"age\\\" : \\\"23\\\"\\n}\"\n },\n {\n \"code\": null,\n \"e\": 42897,\n \"s\": 42790,\n \"text\": \"Use revision id _rev from the document to update the document. Here we are updating the age from 23 to 24.\"\n },\n {\n \"code\": null,\n \"e\": 43119,\n \"s\": 42897,\n \"text\": \"$ curl -X PUT http://127.0.0.1:5984/my_database/001/ -d\\n' { \\\" age \\\" : \\\" 24 \\\" , \\\" _rev \\\" : \\\" 1-1c2fae390fa5475d9b809301bbf3f25e \\\" } '\\n\\n{ \\\" ok \\\" : true , \\\" id \\\" : \\\" 001 \\\" , \\\" rev \\\" : \\\" 2-04d8eac1680d237ca25b68b36b8899d3 \\\" }\"\n },\n {\n \"code\": null,\n \"e\": 43200,\n \"s\": 43119,\n \"text\": \"To verify the document, get the document again using GET request as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 43355,\n \"s\": 43200,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\" _id \\\" : \\\" 001 \\\",\\n \\\" _rev \\\" : \\\" 2-04d8eac1680d237ca25b68b36b8899d3 \\\" ,\\n \\\" age \\\" : \\\" 23 \\\"\\n }\\n\"\n },\n {\n \"code\": null,\n \"e\": 43430,\n \"s\": 43355,\n \"text\": \"Following are some important points to be noted while updating a document.\"\n },\n {\n \"code\": null,\n \"e\": 43511,\n \"s\": 43430,\n \"text\": \"The URL we send in the request containing the database name and the document id.\"\n },\n {\n \"code\": null,\n \"e\": 43592,\n \"s\": 43511,\n \"text\": \"The URL we send in the request containing the database name and the document id.\"\n },\n {\n \"code\": null,\n \"e\": 43815,\n \"s\": 43592,\n \"text\": \"Updating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID.\"\n },\n {\n \"code\": null,\n \"e\": 44038,\n \"s\": 43815,\n \"text\": \"Updating an existing document is same as updating the entire document. You cannot add a field to an existing document. You can only write an entirely new version of the document into the database with the same document ID.\"\n },\n {\n \"code\": null,\n \"e\": 44107,\n \"s\": 44038,\n \"text\": \"We have to supply the revision number as a part of the JSON request.\"\n },\n {\n \"code\": null,\n \"e\": 44176,\n \"s\": 44107,\n \"text\": \"We have to supply the revision number as a part of the JSON request.\"\n },\n {\n \"code\": null,\n \"e\": 44395,\n \"s\": 44176,\n \"text\": \"In return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number.\"\n },\n {\n \"code\": null,\n \"e\": 44614,\n \"s\": 44395,\n \"text\": \"In return JSON contains the success message, the ID of the document being updated, and the new revision information. If you want to update the new version of the document, you have to quote this latest revision number.\"\n },\n {\n \"code\": null,\n \"e\": 44735,\n \"s\": 44614,\n \"text\": \"To delete a document open the http://127.0.0.1:5984/_utils/ url to get an\\nOverview/index page of CouchDB as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 44950,\n \"s\": 44735,\n \"text\": \"Select the database in which the document to be updated exists and click it. Here we are updating a document in the database named tutorials_point. You will get the list of documents in the database as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 45068,\n \"s\": 44950,\n \"text\": \"Select a document that you want to update and click on it. You will get the contents of the documents as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 45224,\n \"s\": 45068,\n \"text\": \"Here, to update the location from Delhi to Hyderabad, click on the text box, edit the field, and click the green button to save the changes as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 45390,\n \"s\": 45224,\n \"text\": \"You can delete a document in CouchDB by sending an HTTP request to the server using DELETE method through cURL utility. Following is the syntax to delete a document.\"\n },\n {\n \"code\": null,\n \"e\": 45466,\n \"s\": 45390,\n \"text\": \"curl -X DELETE http : // 127.0.0.1:5984 / database name/database id?_rev id\"\n },\n {\n \"code\": null,\n \"e\": 45799,\n \"s\": 45466,\n \"text\": \"Using −X, we can specify a custom request method of HTTP we are using, while communicating with the HTTP server. In this case, we are using Delete method. To delete a database /database_name/database_id/ is not enough. You have to pass the recent revision id through the url. To mention attributes of any data structure \\\"?\\\" is used.\"\n },\n {\n \"code\": null,\n \"e\": 45986,\n \"s\": 45799,\n \"text\": \"Suppose there is a document in database named my_database with document id 001. To delete this document, you have to get the rev id of the document. Get the document data as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 46139,\n \"s\": 45986,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\" _id \\\" : \\\" 001 \\\",\\n \\\" _rev \\\" : \\\" 2-04d8eac1680d237ca25b68b36b8899d3 \\\" ,\\n \\\" age \\\" : \\\" 23 \\\"\\n}\"\n },\n {\n \"code\": null,\n \"e\": 46278,\n \"s\": 46139,\n \"text\": \"Now specify the revision id of the document to be deleted, id of the document, and database name the document belongs to, as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 46440,\n \"s\": 46278,\n \"text\": \"$ curl -X DELETE http://127.0.0.1:5984/my_database/001?rev=1-\\n3fcc78daac7a90803f0a5e383f4f1e1e\\n\\n{\\\"ok\\\":true,\\\"id\\\":\\\"001\\\",\\\"rev\\\":\\\"2-3a561d56de1ce3305d693bd15630bf96\\\"}\"\n },\n {\n \"code\": null,\n \"e\": 46630,\n \"s\": 46440,\n \"text\": \"To verify whether the document is deleted, try to fetch the document by using the GET method. Since you are fetching a deleted document, this will give you an error message as shown below −\"\n },\n {\n \"code\": null,\n \"e\": 46724,\n \"s\": 46630,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\\"error\\\":\\\"not_found\\\",\\\"reason\\\":\\\"deleted\\\"}\\n\"\n },\n {\n \"code\": null,\n \"e\": 46841,\n \"s\": 46724,\n \"text\": \"First of all, verify the documents in the database. Following is the snapshot of the database named tutorials_point.\"\n },\n {\n \"code\": null,\n \"e\": 46964,\n \"s\": 46841,\n \"text\": \"Here you can observe, the database consists of three documents. To delete any of the documents say 003, do the following −\"\n },\n {\n \"code\": null,\n \"e\": 47083,\n \"s\": 46964,\n \"text\": \"Click on the document, you will get a page showing the contents of selected document in the form of field-value pairs.\"\n },\n {\n \"code\": null,\n \"e\": 47202,\n \"s\": 47083,\n \"text\": \"Click on the document, you will get a page showing the contents of selected document in the form of field-value pairs.\"\n },\n {\n \"code\": null,\n \"e\": 47308,\n \"s\": 47202,\n \"text\": \"This page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document.\"\n },\n {\n \"code\": null,\n \"e\": 47414,\n \"s\": 47308,\n \"text\": \"This page also contains four options namely Save Document, Add Field, Upload Attachment, Delete Document.\"\n },\n {\n \"code\": null,\n \"e\": 47447,\n \"s\": 47414,\n \"text\": \"Click on Delete Document option.\"\n },\n {\n \"code\": null,\n \"e\": 47480,\n \"s\": 47447,\n \"text\": \"Click on Delete Document option.\"\n },\n {\n \"code\": null,\n \"e\": 47603,\n \"s\": 47480,\n \"text\": \"You will get a dialog box saying \\\"Are you sure you want to delete this document?\\\" Click on delete, to delete the document.\"\n },\n {\n \"code\": null,\n \"e\": 47726,\n \"s\": 47603,\n \"text\": \"You will get a dialog box saying \\\"Are you sure you want to delete this document?\\\" Click on delete, to delete the document.\"\n },\n {\n \"code\": null,\n \"e\": 48021,\n \"s\": 47726,\n \"text\": \"You can attach files to CouchDB just like email. The file contains metadata like name and includes its MIME type, and the number of bytes the attachment contains. To attach files to a document you have to send PUT request to the server. Following is the syntax to attach files to the document −\"\n },\n {\n \"code\": null,\n \"e\": 48177,\n \"s\": 48021,\n \"text\": \"$ curl -vX PUT http://127.0.0.1:5984/database_name/database_id\\n/filename?rev=document rev_id --data-binary @filename -H \\\"Content-Type:\\ntype of the content\\\"\"\n },\n {\n \"code\": null,\n \"e\": 48235,\n \"s\": 48177,\n \"text\": \"The request has various options that are explained below.\"\n },\n {\n \"code\": null,\n \"e\": 48329,\n \"s\": 48235,\n \"text\": \"--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body.\"\n },\n {\n \"code\": null,\n \"e\": 48423,\n \"s\": 48329,\n \"text\": \"--data-binary@ − This option tells cURL to read a file’s contents into the HTTP request body.\"\n },\n {\n \"code\": null,\n \"e\": 48512,\n \"s\": 48423,\n \"text\": \"-H − This option is used to mention the content type of the file we are going to upload.\"\n },\n {\n \"code\": null,\n \"e\": 48601,\n \"s\": 48512,\n \"text\": \"-H − This option is used to mention the content type of the file we are going to upload.\"\n },\n {\n \"code\": null,\n \"e\": 48845,\n \"s\": 48601,\n \"text\": \"Let us attach a file named boy.jpg, to the document with id 001, in the database named my_database by sending PUT request to CouchDB. Before that, you have to fetch the data of the document with id 001 to get its current rev id as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 48966,\n \"s\": 48845,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\"_id\\\": \\\"001\\\",\\n \\\"_rev\\\": \\\"1-967a00dff5e02add41819138abb3284d\\\"\\n}\"\n },\n {\n \"code\": null,\n \"e\": 49051,\n \"s\": 48966,\n \"text\": \"Now using the _rev value, send the PUT request to the CouchDB server as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 49203,\n \"s\": 49051,\n \"text\": \"$ curl -vX PUT http://127.0.0.1:5984/my_database/001/boy.jpg?rev=1-\\n967a00dff5e02add41819138abb3284d --data-binary @boy.jpg -H \\\"ContentType:\\nimage/jpg\\\"\"\n },\n {\n \"code\": null,\n \"e\": 49292,\n \"s\": 49203,\n \"text\": \"To verify whether the attachment is uploaded, fetch the document content as shown below−\"\n },\n {\n \"code\": null,\n \"e\": 49627,\n \"s\": 49292,\n \"text\": \"$ curl -X GET http://127.0.0.1:5984/my_database/001\\n{\\n \\\"_id\\\": \\\"001\\\",\\n \\\"_rev\\\": \\\"2-4705a219cdcca7c72aac4f623f5c46a8\\\",\\n \\\"_attachments\\\": {\\n \\\"boy.jpg\\\": {\\n \\\"content_type\\\": \\\"image/jpg\\\",\\n \\\"revpos\\\": 2,\\n \\\"digest\\\": \\\"md5-9Swz8jvmga5mfBIsmCxCtQ==\\\",\\n \\\"length\\\": 91408,\\n \\\"stub\\\": true\\n }\\n }\\n}\\n\"\n },\n {\n \"code\": null,\n \"e\": 49899,\n \"s\": 49627,\n \"text\": \"Using this option, you can upload a new attachment such as a file, image, or document, to the database. To do so, click on the Upload Attachment button. A dialog box will appear where you can choose the file to be uploaded. Select the file and click on the Upload button.\"\n },\n {\n \"code\": null,\n \"e\": 50007,\n \"s\": 49899,\n \"text\": \"The file uploaded will be displayed under _attachments field. Later you can see the file by clicking on it.\"\n },\n {\n \"code\": null,\n \"e\": 50014,\n \"s\": 50007,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 50025,\n \"s\": 50014,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":49,"cells":{"title":{"kind":"string","value":"Make Unordered list with Bootstrap"},"text":{"kind":"string","value":"For unordered list in Bootstrap, you can try to run the following code −\nLive Demo\n\n\n \n Bootstrap lists\n \n \n \n \n \n \n

Lists

\n

Fruits (Ordered List)

\n
    \n
  1. Kiwi
  2. \n
  3. Apple
  4. \n
  5. Mango
  6. \n
\n

Vegetables (UnOrdered List)

\n
    \n
  • Tomato
  • \n
  • Brinjal
  • \n
  • Broccoli
  • \n
\n \n"},"parsed":{"kind":"list like","value":[{"code":null,"e":1135,"s":1062,"text":"For unordered list in Bootstrap, you can try to run the following code −"},{"code":null,"e":1145,"s":1135,"text":"Live Demo"},{"code":null,"e":1946,"s":1145,"text":"\n\n \n Bootstrap lists\n \n \n \n \n \n \n

Lists

\n

Fruits (Ordered List)

\n
    \n
  1. Kiwi
  2. \n
  3. Apple
  4. \n
  5. Mango
  6. \n
\n

Vegetables (UnOrdered List)

\n
    \n
  • Tomato
  • \n
  • Brinjal
  • \n
  • Broccoli
  • \n
\n \n"}],"string":"[\n {\n \"code\": null,\n \"e\": 1135,\n \"s\": 1062,\n \"text\": \"For unordered list in Bootstrap, you can try to run the following code −\"\n },\n {\n \"code\": null,\n \"e\": 1145,\n \"s\": 1135,\n \"text\": \"Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 1946,\n \"s\": 1145,\n \"text\": \"\\n\\n \\n Bootstrap lists\\n \\n \\n \\n \\n \\n \\n

Lists

\\n

Fruits (Ordered List)

\\n
    \\n
  1. Kiwi
  2. \\n
  3. Apple
  4. \\n
  5. Mango
  6. \\n
\\n

Vegetables (UnOrdered List)

\\n
    \\n
  • Tomato
  • \\n
  • Brinjal
  • \\n
  • Broccoli
  • \\n
\\n \\n\"\n }\n]"}}},{"rowIdx":50,"cells":{"title":{"kind":"string","value":"How to populate a Map using a lambda expression in Java?\n"},"text":{"kind":"string","value":"A Map is a collection object that maps keys to values in Java. The data can be stored in key/value pairs and each key is unique. These key/value pairs are also called map entries.\nIn the below example, we can populate a Map using a lambda expression. We have passed Character and Runnable arguments to Map object and pass a lambda expression as the second argument in the put() method of Map class. We need to pass command-line arguments whether the user enters 'h' for Help and 'q' for quit with the help of Scanner class.\nimport java.util.*;\n\npublic class PopulateUsingMapLambdaTest {\n public static void main(String[] args) {\n Map map = new HashMap<>();\n\n map.put('h', () -> System.out.println(\"Type h or q\")); // lambda expression\n map.put('q', () -> System.exit(0)); // lambda expression\n\n while(true) {\n System.out.println(\"Menu\");\n System.out.println(\"h) Help\");\n System.out.println(\"q) Quit\");\n char key = new Scanner(System.in).nextLine().charAt(0);\n if(map.containsKey(key))\n map.get(key).run();\n }\n }\n}\nMenu\nh) Help\nq) Quit\nType h or q :\nq"},"parsed":{"kind":"list like","value":[{"code":null,"e":1242,"s":1062,"text":"A Map is a collection object that maps keys to values in Java. The data can be stored in key/value pairs and each key is unique. These key/value pairs are also called map entries."},{"code":null,"e":1586,"s":1242,"text":"In the below example, we can populate a Map using a lambda expression. We have passed Character and Runnable arguments to Map object and pass a lambda expression as the second argument in the put() method of Map class. We need to pass command-line arguments whether the user enters 'h' for Help and 'q' for quit with the help of Scanner class."},{"code":null,"e":2182,"s":1586,"text":"import java.util.*;\n\npublic class PopulateUsingMapLambdaTest {\n public static void main(String[] args) {\n Map map = new HashMap<>();\n\n map.put('h', () -> System.out.println(\"Type h or q\")); // lambda expression\n map.put('q', () -> System.exit(0)); // lambda expression\n\n while(true) {\n System.out.println(\"Menu\");\n System.out.println(\"h) Help\");\n System.out.println(\"q) Quit\");\n char key = new Scanner(System.in).nextLine().charAt(0);\n if(map.containsKey(key))\n map.get(key).run();\n }\n }\n}"},{"code":null,"e":2219,"s":2182,"text":"Menu\nh) Help\nq) Quit\nType h or q :\nq"}],"string":"[\n {\n \"code\": null,\n \"e\": 1242,\n \"s\": 1062,\n \"text\": \"A Map is a collection object that maps keys to values in Java. The data can be stored in key/value pairs and each key is unique. These key/value pairs are also called map entries.\"\n },\n {\n \"code\": null,\n \"e\": 1586,\n \"s\": 1242,\n \"text\": \"In the below example, we can populate a Map using a lambda expression. We have passed Character and Runnable arguments to Map object and pass a lambda expression as the second argument in the put() method of Map class. We need to pass command-line arguments whether the user enters 'h' for Help and 'q' for quit with the help of Scanner class.\"\n },\n {\n \"code\": null,\n \"e\": 2182,\n \"s\": 1586,\n \"text\": \"import java.util.*;\\n\\npublic class PopulateUsingMapLambdaTest {\\n public static void main(String[] args) {\\n Map map = new HashMap<>();\\n\\n map.put('h', () -> System.out.println(\\\"Type h or q\\\")); // lambda expression\\n map.put('q', () -> System.exit(0)); // lambda expression\\n\\n while(true) {\\n System.out.println(\\\"Menu\\\");\\n System.out.println(\\\"h) Help\\\");\\n System.out.println(\\\"q) Quit\\\");\\n char key = new Scanner(System.in).nextLine().charAt(0);\\n if(map.containsKey(key))\\n map.get(key).run();\\n }\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 2219,\n \"s\": 2182,\n \"text\": \"Menu\\nh) Help\\nq) Quit\\nType h or q :\\nq\"\n }\n]"}}},{"rowIdx":51,"cells":{"title":{"kind":"string","value":"PyQt5 Label – Getting Blur effect object - GeeksforGeeks"},"text":{"kind":"string","value":"10 May, 2020\nIn this article we will see how we can get blur effect object of the label by default there is no blur effect to the label although we can create blur effect then add it to the label with the help of setGraphicsEffect method.\nIn order to do this we have to do the following –\n1. Create a label2. Set geometry to the label3. Create a QGraphicsBlurEffect object4. Add this object to the label with the help of setGraphicsEffect method5. Get the opacity object with the help of graphicsEffect method\nNote : This object have same properties of the original object\nSyntax :\n# creating a blur effect\nblur_effect = QGraphicsBlurEffect()\n\n# adding blur effect to the label\nlabel.setGraphicsEffect(blur_effect)\n\n# getting the blur object\nobject = label.graphicsEffect()\n\nBelow is the implementation\n# importing librariesfrom PyQt5.QtWidgets import * from PyQt5 import QtCore, QtGuifrom PyQt5.QtGui import * from PyQt5.QtCore import * import sys class Window(QMainWindow): def __init__(self): super().__init__() # setting title self.setWindowTitle(\"Python \") # setting geometry self.setGeometry(100, 100, 600, 400) # calling method self.UiComponents() # showing all the widgets self.show() # method for widgets def UiComponents(self): # creating label label = QLabel(\"Label\", self) # setting geometry to the label label.setGeometry(200, 100, 150, 60) # setting alignment to the label label.setAlignment(Qt.AlignCenter) # setting font label.setFont(QFont('Arial', 15)) # setting style sheet of the label label.setStyleSheet(\"QLabel\" \"{\" \"border : 2px solid green;\" \"background : lightgreen;\" \"}\") # creating a blur effect self.blur_effect = QGraphicsBlurEffect() # adding blur effect to the label label.setGraphicsEffect(self.blur_effect) # result label result = QLabel(self) # setting geometry of the result label result.setGeometry(200, 200, 300, 30) # getting the blur object object = label.graphicsEffect() # setting text to the result label result.setText(str(object)) # create pyqt5 appApp = QApplication(sys.argv) # create the instance of our Windowwindow = Window() # start the appsys.exit(App.exec())\nOutput :\nPython PyQt5-Label\nPython-gui\nPython-PyQt\nPython\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nComments\nOld Comments\nHow to Install PIP on Windows ?\nHow to drop one or multiple columns in Pandas Dataframe\nHow To Convert Python Dictionary To JSON?\nCheck if element exists in list in Python\nPython | Pandas dataframe.groupby()\nDefaultdict in Python\nPython | Get unique values from a list\nPython Classes and Objects\nPython | os.path.join() method\nCreate a directory in Python"},"parsed":{"kind":"list like","value":[{"code":null,"e":23901,"s":23873,"text":"\n10 May, 2020"},{"code":null,"e":24127,"s":23901,"text":"In this article we will see how we can get blur effect object of the label by default there is no blur effect to the label although we can create blur effect then add it to the label with the help of setGraphicsEffect method."},{"code":null,"e":24177,"s":24127,"text":"In order to do this we have to do the following –"},{"code":null,"e":24398,"s":24177,"text":"1. Create a label2. Set geometry to the label3. Create a QGraphicsBlurEffect object4. Add this object to the label with the help of setGraphicsEffect method5. Get the opacity object with the help of graphicsEffect method"},{"code":null,"e":24461,"s":24398,"text":"Note : This object have same properties of the original object"},{"code":null,"e":24470,"s":24461,"text":"Syntax :"},{"code":null,"e":24663,"s":24470,"text":"# creating a blur effect\nblur_effect = QGraphicsBlurEffect()\n\n# adding blur effect to the label\nlabel.setGraphicsEffect(blur_effect)\n\n# getting the blur object\nobject = label.graphicsEffect()\n"},{"code":null,"e":24691,"s":24663,"text":"Below is the implementation"},{"code":"# importing librariesfrom PyQt5.QtWidgets import * from PyQt5 import QtCore, QtGuifrom PyQt5.QtGui import * from PyQt5.QtCore import * import sys class Window(QMainWindow): def __init__(self): super().__init__() # setting title self.setWindowTitle(\"Python \") # setting geometry self.setGeometry(100, 100, 600, 400) # calling method self.UiComponents() # showing all the widgets self.show() # method for widgets def UiComponents(self): # creating label label = QLabel(\"Label\", self) # setting geometry to the label label.setGeometry(200, 100, 150, 60) # setting alignment to the label label.setAlignment(Qt.AlignCenter) # setting font label.setFont(QFont('Arial', 15)) # setting style sheet of the label label.setStyleSheet(\"QLabel\" \"{\" \"border : 2px solid green;\" \"background : lightgreen;\" \"}\") # creating a blur effect self.blur_effect = QGraphicsBlurEffect() # adding blur effect to the label label.setGraphicsEffect(self.blur_effect) # result label result = QLabel(self) # setting geometry of the result label result.setGeometry(200, 200, 300, 30) # getting the blur object object = label.graphicsEffect() # setting text to the result label result.setText(str(object)) # create pyqt5 appApp = QApplication(sys.argv) # create the instance of our Windowwindow = Window() # start the appsys.exit(App.exec())","e":26376,"s":24691,"text":null},{"code":null,"e":26385,"s":26376,"text":"Output :"},{"code":null,"e":26404,"s":26385,"text":"Python PyQt5-Label"},{"code":null,"e":26415,"s":26404,"text":"Python-gui"},{"code":null,"e":26427,"s":26415,"text":"Python-PyQt"},{"code":null,"e":26434,"s":26427,"text":"Python"},{"code":null,"e":26532,"s":26434,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":26541,"s":26532,"text":"Comments"},{"code":null,"e":26554,"s":26541,"text":"Old Comments"},{"code":null,"e":26586,"s":26554,"text":"How to Install PIP on Windows ?"},{"code":null,"e":26642,"s":26586,"text":"How to drop one or multiple columns in Pandas Dataframe"},{"code":null,"e":26684,"s":26642,"text":"How To Convert Python Dictionary To JSON?"},{"code":null,"e":26726,"s":26684,"text":"Check if element exists in list in Python"},{"code":null,"e":26762,"s":26726,"text":"Python | Pandas dataframe.groupby()"},{"code":null,"e":26784,"s":26762,"text":"Defaultdict in Python"},{"code":null,"e":26823,"s":26784,"text":"Python | Get unique values from a list"},{"code":null,"e":26850,"s":26823,"text":"Python Classes and Objects"},{"code":null,"e":26881,"s":26850,"text":"Python | os.path.join() method"}],"string":"[\n {\n \"code\": null,\n \"e\": 23901,\n \"s\": 23873,\n \"text\": \"\\n10 May, 2020\"\n },\n {\n \"code\": null,\n \"e\": 24127,\n \"s\": 23901,\n \"text\": \"In this article we will see how we can get blur effect object of the label by default there is no blur effect to the label although we can create blur effect then add it to the label with the help of setGraphicsEffect method.\"\n },\n {\n \"code\": null,\n \"e\": 24177,\n \"s\": 24127,\n \"text\": \"In order to do this we have to do the following –\"\n },\n {\n \"code\": null,\n \"e\": 24398,\n \"s\": 24177,\n \"text\": \"1. Create a label2. Set geometry to the label3. Create a QGraphicsBlurEffect object4. Add this object to the label with the help of setGraphicsEffect method5. Get the opacity object with the help of graphicsEffect method\"\n },\n {\n \"code\": null,\n \"e\": 24461,\n \"s\": 24398,\n \"text\": \"Note : This object have same properties of the original object\"\n },\n {\n \"code\": null,\n \"e\": 24470,\n \"s\": 24461,\n \"text\": \"Syntax :\"\n },\n {\n \"code\": null,\n \"e\": 24663,\n \"s\": 24470,\n \"text\": \"# creating a blur effect\\nblur_effect = QGraphicsBlurEffect()\\n\\n# adding blur effect to the label\\nlabel.setGraphicsEffect(blur_effect)\\n\\n# getting the blur object\\nobject = label.graphicsEffect()\\n\"\n },\n {\n \"code\": null,\n \"e\": 24691,\n \"s\": 24663,\n \"text\": \"Below is the implementation\"\n },\n {\n \"code\": \"# importing librariesfrom PyQt5.QtWidgets import * from PyQt5 import QtCore, QtGuifrom PyQt5.QtGui import * from PyQt5.QtCore import * import sys class Window(QMainWindow): def __init__(self): super().__init__() # setting title self.setWindowTitle(\\\"Python \\\") # setting geometry self.setGeometry(100, 100, 600, 400) # calling method self.UiComponents() # showing all the widgets self.show() # method for widgets def UiComponents(self): # creating label label = QLabel(\\\"Label\\\", self) # setting geometry to the label label.setGeometry(200, 100, 150, 60) # setting alignment to the label label.setAlignment(Qt.AlignCenter) # setting font label.setFont(QFont('Arial', 15)) # setting style sheet of the label label.setStyleSheet(\\\"QLabel\\\" \\\"{\\\" \\\"border : 2px solid green;\\\" \\\"background : lightgreen;\\\" \\\"}\\\") # creating a blur effect self.blur_effect = QGraphicsBlurEffect() # adding blur effect to the label label.setGraphicsEffect(self.blur_effect) # result label result = QLabel(self) # setting geometry of the result label result.setGeometry(200, 200, 300, 30) # getting the blur object object = label.graphicsEffect() # setting text to the result label result.setText(str(object)) # create pyqt5 appApp = QApplication(sys.argv) # create the instance of our Windowwindow = Window() # start the appsys.exit(App.exec())\",\n \"e\": 26376,\n \"s\": 24691,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 26385,\n \"s\": 26376,\n \"text\": \"Output :\"\n },\n {\n \"code\": null,\n \"e\": 26404,\n \"s\": 26385,\n \"text\": \"Python PyQt5-Label\"\n },\n {\n \"code\": null,\n \"e\": 26415,\n \"s\": 26404,\n \"text\": \"Python-gui\"\n },\n {\n \"code\": null,\n \"e\": 26427,\n \"s\": 26415,\n \"text\": \"Python-PyQt\"\n },\n {\n \"code\": null,\n \"e\": 26434,\n \"s\": 26427,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 26532,\n \"s\": 26434,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 26541,\n \"s\": 26532,\n \"text\": \"Comments\"\n },\n {\n \"code\": null,\n \"e\": 26554,\n \"s\": 26541,\n \"text\": \"Old Comments\"\n },\n {\n \"code\": null,\n \"e\": 26586,\n \"s\": 26554,\n \"text\": \"How to Install PIP on Windows ?\"\n },\n {\n \"code\": null,\n \"e\": 26642,\n \"s\": 26586,\n \"text\": \"How to drop one or multiple columns in Pandas Dataframe\"\n },\n {\n \"code\": null,\n \"e\": 26684,\n \"s\": 26642,\n \"text\": \"How To Convert Python Dictionary To JSON?\"\n },\n {\n \"code\": null,\n \"e\": 26726,\n \"s\": 26684,\n \"text\": \"Check if element exists in list in Python\"\n },\n {\n \"code\": null,\n \"e\": 26762,\n \"s\": 26726,\n \"text\": \"Python | Pandas dataframe.groupby()\"\n },\n {\n \"code\": null,\n \"e\": 26784,\n \"s\": 26762,\n \"text\": \"Defaultdict in Python\"\n },\n {\n \"code\": null,\n \"e\": 26823,\n \"s\": 26784,\n \"text\": \"Python | Get unique values from a list\"\n },\n {\n \"code\": null,\n \"e\": 26850,\n \"s\": 26823,\n \"text\": \"Python Classes and Objects\"\n },\n {\n \"code\": null,\n \"e\": 26881,\n \"s\": 26850,\n \"text\": \"Python | os.path.join() method\"\n }\n]"}}},{"rowIdx":52,"cells":{"title":{"kind":"string","value":"Spring Security - OAuth2"},"text":{"kind":"string","value":"OAuth2.0 Fundamentals\nOAuth2.0 Getting started(Practical Guide)\nOAuth 2.0 was developed by IETF OAuth Working Group and published in October of 2012. It serves as an open authorization protocol for enabling a third party application to get limited access to an HTTP service on behalf of the resource owner. It can do so while not revealing the identity or the long-term credentials of the user. A third-party application itself can also use it on its behalf. The working principle of OAuth consists of the delegation of user authentication to a service hosting the user account and authorizing the third-party application access to the account of the user.\nLet us consider an example. Let us say we want to login to a website “clientsite.com”. We can sign in via Facebook, Github, Google or Microsoft. We select any options of the options given above, and we are redirected to the respective website for login. If login is successful, we are asked if we want to give clientsite.com access to the specific data requested by it. We select our desired option and we are redirected to clientsite.com with an authorization code or error code and our login is successful or not depending on our action in the third-party resource. This is the basic working principle of OAuth 2.\nThere are five key actors involved in an OAuth system. Let’s list them out −\nUser / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client.\nUser / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client.\nUser-Agent − The browser used by the User.\nUser-Agent − The browser used by the User.\nClient − The application requesting an access token.\nClient − The application requesting an access token.\nAuthorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime.\nAuthorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime.\nResource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization.\nResource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization.\nWe will be developing a Spring Boot Application with Spring Security and OAuth 2.0 to illustrate the above. We will be developing a basic application with an in-memory database to store user credentials now. The application will make it easy for us to understand the workings of OAuth 2.0 with Spring Security.\nLet’s use the Spring initializer to create a maven project in Java 8. Let’s start by going to start.spring.io. We generate an application with the following dependencies−\nSpring Web\nSpring Security\nCloud OAuth2\nSpring Boot Devtools\nWith the above configuration, we click on the Generate button to generate a project. The project will be downloaded in a zip file. We extract the zip to a folder. We can then open the project in an IDE of our choice. I am using Spring Tools Suite here as it is optimized for spring applications. We can also use Eclipse or IntelliJ Idea as we wish.\nSo, we open the project in STS, let the dependencies get downloaded. Then we can see the project structure in our package explorer window. It should resemble the screenshot below.\nIf we open the pom.xml file we can view the dependencies and other details related to the project. It should look something like this.\n \n \n 4.0.0 \n \n org.springframework.boot \n spring-boot-starter-parent \n 2.3.1.RELEASE \n \n \n com.tutorial \n spring.security.oauth2 \n 0.0.1-SNAPSHOT \n spring.security.oauth2 \n Demo project for Spring Boot \n \n 1.8 \n Hoxton.SR6 \n \n \n \n org.springframework.boot \n spring-boot-starter-security \n \n \n org.springframework.boot \n spring-boot-starter-web \n \n \n org.springframework.cloud \n spring-cloud-starter-oauth2 \n \n \n org.springframework.boot \n spring-boot-devtools\n runtime \n true \n \n \n org.springframework.boot \n spring-boot-starter-test \n test \n org.junit.vintage \n junit-vintage-engine \n \n \n \n \n org.springframework.security \n spring-security-test \n test \n \n \n \n \n \n org.springframework.cloud \n spring-cloud-dependencies \n ${spring-cloud.version} \n pom \n import \n \n \n \n \n \n org.springframework.boot\n spring-boot-maven-plugin \n \n \n \n\nNow, to the base package of our application, i.e., com.tutorial.spring.security.oauth2, let’s add a new package named config where we shall add our configuration classes.\nLet’s create our first configuration class, UserConfig which extends the WebSecurityConfigurerAdapter class of Spring Security to manage the users of the client application. We annotate the class with @Configuration annotation to tell Spring that it is a configuration class.\npackage com.tutorial.spring.security.oauth2.config; \nimport org.springframework.context.annotation.Bean; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \nimport org.springframework.security.core.userdetails.User; \nimport org.springframework.security.core.userdetails.UserDetails;\nimport org.springframework.security.core.userdetails.UserDetailsService; \nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \nimport org.springframework.security.crypto.password.PasswordEncoder; \nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \nimport org.springframework.security.provisioning.UserDetailsManager; \n@Configuration public class UserConfig extends WebSecurityConfigurerAdapter { \n @Bean \n public UserDetailsService userDetailsService() {\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \n UserDetails user = User.withUsername(\"john\") \n .password(\"12345\") .authorities(\"read\") \n .build(); userDetailsManager.createUser(user); return userDetailsManager; \n } \n @Bean\n public PasswordEncoder passwordEncoder() { \n return NoOpPasswordEncoder.getInstance(); \n } \n @Override \n @Bean \n public AuthenticationManager authenticationManagerBean() throws Exception { \n return super.authenticationManagerBean(); \n } \n}\nWe then add a bean of the UserDetailsService to retrieve the user details for authentication and authorization. To put it in the Spring context we annotate it with @Bean. To keep this tutorial simple and easy to understand, we use an InMemoryUserDetailsManager instance. For a real-world application, we can use other implementations like JdbcUserDetailsManager to connect to a database and so on. To be able to create users easily for this example we use the UserDetailsManager interface which extends the UserDetailsService and has methods like createUser(), updateUser() and so on. Then, we create a user using the builder class. We give him a username, password and a “read” authority for now. Then, using the createUser() method, we add the newly created user and return the instance of UserDetailsManager thus putting it in the Spring context.\nTo be able to use the UserDetailsService defined by us, it is necessary to provide a PasswordEncoder bean in the Spring context. Again, to keep it simple for now we use the NoOpPasswordEncoder. The NoOpPasswordEncoder should not be used otherwise for real-world applications for production as it is not secure. NoOpPasswordEncoder does not encode the password and is only useful for developing or testing scenarios or proof of concepts. We should always use the other highly secure options provided by Spring Security, the most popular of which is the BCryptPasswordEncoder, which we will be using later in our series of tutorials. To put it in the Spring context we annotate the method with @Bean.\nWe then override the AuthenticationManager bean method of WebSecurityConfigurerAdapter, which returns the authenticationManagerBean to put the authentication manager into the Spring context.\nNow, to add the client configurations we add a new configuration class named AuthorizationServerConfig which extends AuthorizationServerConfigurerAdapter class of Spring Security. The AuthorizationServerConfigurerAdapter class is used to configure the authorization server using the spring security oauth2 module. We annotate this class with @Configuration as well. To add the authorization server functionality to this class we need to add the @EnableAuthorizationServer annotation so that the application can behave as an authorization server.\npackage com.tutorial.spring.security.oauth2.config; \nimport org.springframework.beans.factory.annotation.Autowired; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; @Configuration @EnableAuthorizationServer \npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter {\n @Autowired private AuthenticationManager authenticationManager; \n @Override \n public void configure(ClientDetailsServiceConfigurer clients) throws Exception { \n clients.inMemory() .withClient(\"oauthclient1\") .secret(\"oauthsecret1\") .scopes(\"read\") .authorizedGrantTypes(\"password\") } \n @Override \n public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \n endpoints.authenticationManager(authenticationManager); \n } \n}\nFor checking oauth tokens, Spring Security oauth exposes two endpoints – /oauth/check_token and /oauth/token_key. These endpoints are protected by default behind denyAll(). tokenKeyAccess() and checkTokenAccess() methods open these endpoints for use.\nWe autowire the AuthenticationManager bean we configured in the UserConfig class as a dependency here which we shall be using later.\nWe then override two of the configure() methods of the AuthorizationServerConfigurerAdapter to provide an in-memory implementation of the client details service. The first method which uses the ClientDetailsServiceConfigurer as a parameter, as the name suggests, allows us to configure the clients for the authorization server. These clients represent the applications that will be able to use the functionality of this authorization server. Since this is a basic application for learning the implementation of OAuth2, we will keep things simple for now and use an in-memory implementation with the following attributes −\nclientId − the id of the client. Required.\nclientId − the id of the client. Required.\nsecret − the client secret, required for trusted clients\nsecret − the client secret, required for trusted clients\nscope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope.\nscope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope.\nauthorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case.\nauthorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case.\nIn “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials for the authorization server we want the tokens from.\nThe other configure() method that we overrode, uses AuthorizationServerEndpointsConfigurer as a parameter, is used to attach the AuthenticationManager to authorization server configuration.\nWith these basic configurations, our Authorization server is ready to use. Let’s go ahead and start it and use it. We will be using Postman ( https://www.postman.com/downloads/ ) for making our requests.\nWhen using STS, we can launch our application and start seeing see the logs in our console. When the application starts, we can find the oauth2 endpoints exposed by our application in the console. Of those endpoints, we will be using the following the below token for now −\n/oauth/token – for obtaining the token.\nIf we check the postman snapshot here, we can notice a few things. Let’s list them down below.\nThe URL − Our Spring Boot Application is running at port 8080 of our local machine, so the request is pointed to http://localhost:8080. The next part is /oauth/token, which we know, is the endpoint exposed by OAuth for generating the token.\nThe query params− Since this is a “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials to the authorization server we want the tokens from.\nClient Authorization − The Oauth system requires the client to be authorized to be able to provide the token. Hence, under the Authorization header, we provide the client authentication information, namely username and password that we configured in our application.\nLet’s take a closer look at the query params and the authorization header −\nThe query params\nClient credentials\nIf everything is correct, we shall be able to see our generated token in the response along with a 200 ok status.\nThe response\nWe can test our server, by putting wrong credentials or no credentials, and we will get back an error which would say the request is unauthorized or has bad credentials.\nThis is our basic oauth authorization server, that uses the password grant type to generate and provide a password.\nNext, let’s implement a more secure, and a more common application of the oauth2 authentication, i.e. with an authorization code grant type. We will update our current application for this purpose.\nThe authorization grant type is different from the password grant type in the sense that the user doesn’t have to share his credentials with the client application. He shares them with the authorization server only and in return authorization code is sent to the client which it uses to authenticate the client. It is more secure than the password grant type as user credentials are not shared with the client application and hence the user’s information stays safe. The client application doesn’t get access to any important user information unless approved by the user.\nIn a few simple steps, we can set up a basic oauth server with an authorization grant type in our application. Let’s see how.\npackage com.tutorial.spring.security.oauth2.config; \nimport org.springframework.beans.factory.annotation.Autowired; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager;\nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; \n@Configuration \n@EnableAuthorizationServer \npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter { \n @Autowired private AuthenticationManager authenticationManager; \n @Override \n public void configure(ClientDetailsServiceConfigurer clients) throws Exception {\n clients.inMemory() \n .withClient(\"oauthclient1\") \n .secret(\"oauthsecret1\")\n .scopes(\"read\") .authorizedGrantTypes(\"password\") \n .and() .withClient(\"oauthclient2\") .secret(\"oauthsecret2\") \n .scopes(\"read\") .authorizedGrantTypes(\"authorization_code\") \n .redirectUris(\"http://locahost:9090\"); \n }\n @Override public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \n endpoints.authenticationManager(authenticationManager); \n } \n}\nLet’s add a second client for this operation oauthclient2 for this operation with a new secret and read scope. Here we have changed the grant type to authorization code for this client. We also added a redirect URI so that the authorization server can callback the client. So, basically the redirect URI is the URI of the client.\nNow, we have to establish a connection between the user and the authorization server. We have to set an interface for the authorization server where the user can provide the credentials. We use the formLogin() implementation of Spring Security to achieve that functionality while keeping things simple. We also make sure that all requests are authenticated.\npackage com.tutorial.spring.security.oauth2.config; \nimport org.springframework.context.annotation.Bean; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \nimport org.springframework.security.core.userdetails.User; \nimport org.springframework.security.core.userdetails.UserDetails; \nimport org.springframework.security.core.userdetails.UserDetailsService; \nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \nimport org.springframework.security.crypto.password.PasswordEncoder; \nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \nimport org.springframework.security.provisioning.UserDetailsManager; \n@SuppressWarnings(\"deprecation\") @Configuration \npublic class UserConfig extends WebSecurityConfigurerAdapter {\n @Bean\n public UserDetailsService userDetailsService() {\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \n UserDetails user = User.withUsername(\"john\") \n .password(\"12345\") .authorities(\"read\") .build(); \n userDetailsManager.createUser(user); return userDetailsManager; \n } \n @Bean public PasswordEncoder passwordEncoder() { \n return NoOpPasswordEncoder.getInstance(); \n } \n @Override \n @Bean \n public AuthenticationManager authenticationManagerBean() throws Exception {\n return super.authenticationManagerBean(); \n }\n @Override protected void configure(HttpSecurity http) throws Exception {\n http.formLogin(); http.authorizeRequests().anyRequest().authenticated(); \n } \n}\nThis completes our setup for the authorization grant type. Now to test our setup and launch our application. We launch our browser at http://localhost:8080/oauth/authorize?response_type=code&client_id=oauthclient2&scope=read. We will redirected to the default form login page of Spring Security.\nHere, the response type code implies that the authorization server will return an access code which will be used by the client to log in. When we use the user credentials we will be asked if I want to grant the permissions asked by the client, in a similar screen as shown below.\nIf we approve and click Authorize we shall see we are redirected to our given redirect url along with the access code. In our case the we are redirected to http://locahost:9090/?code=7Hibnw, as we specified in the application. We can use the code now as a client in Postman to login to the authorization server.\nAs we can see here, we have used the code received from the authorization server in our URL, and the grant_type as authorization_code and scope as read. We acted as the client and provided the client credentials as configured in our application. When we make this request we get back our access_token which we can use further.\nSo, we have seen how we can configure Spring Security with OAuth 2.0. The application is pretty simple and easy to understand and helps us understand the process fairly easily. We have used two kinds of authorization grant types and seen how we can use them to acquire access tokens for our client application.\n\n 102 Lectures \n 8 hours \n\n Karthikeya T\n\n 39 Lectures \n 5 hours \n\n Chaand Sheikh\n\n 73 Lectures \n 5.5 hours \n\n Senol Atac\n\n 62 Lectures \n 4.5 hours \n\n Senol Atac\n\n 67 Lectures \n 4.5 hours \n\n Senol Atac\n\n 69 Lectures \n 5 hours \n\n Senol Atac\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":1857,"s":1835,"text":"OAuth2.0 Fundamentals"},{"code":null,"e":1899,"s":1857,"text":"OAuth2.0 Getting started(Practical Guide)"},{"code":null,"e":2492,"s":1899,"text":"OAuth 2.0 was developed by IETF OAuth Working Group and published in October of 2012. It serves as an open authorization protocol for enabling a third party application to get limited access to an HTTP service on behalf of the resource owner. It can do so while not revealing the identity or the long-term credentials of the user. A third-party application itself can also use it on its behalf. The working principle of OAuth consists of the delegation of user authentication to a service hosting the user account and authorizing the third-party application access to the account of the user."},{"code":null,"e":3108,"s":2492,"text":"Let us consider an example. Let us say we want to login to a website “clientsite.com”. We can sign in via Facebook, Github, Google or Microsoft. We select any options of the options given above, and we are redirected to the respective website for login. If login is successful, we are asked if we want to give clientsite.com access to the specific data requested by it. We select our desired option and we are redirected to clientsite.com with an authorization code or error code and our login is successful or not depending on our action in the third-party resource. This is the basic working principle of OAuth 2."},{"code":null,"e":3185,"s":3108,"text":"There are five key actors involved in an OAuth system. Let’s list them out −"},{"code":null,"e":3327,"s":3185,"text":"User / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client."},{"code":null,"e":3469,"s":3327,"text":"User / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client."},{"code":null,"e":3512,"s":3469,"text":"User-Agent − The browser used by the User."},{"code":null,"e":3555,"s":3512,"text":"User-Agent − The browser used by the User."},{"code":null,"e":3608,"s":3555,"text":"Client − The application requesting an access token."},{"code":null,"e":3661,"s":3608,"text":"Client − The application requesting an access token."},{"code":null,"e":3808,"s":3661,"text":"Authorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime."},{"code":null,"e":3955,"s":3808,"text":"Authorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime."},{"code":null,"e":4088,"s":3955,"text":"Resource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization."},{"code":null,"e":4221,"s":4088,"text":"Resource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization."},{"code":null,"e":4532,"s":4221,"text":"We will be developing a Spring Boot Application with Spring Security and OAuth 2.0 to illustrate the above. We will be developing a basic application with an in-memory database to store user credentials now. The application will make it easy for us to understand the workings of OAuth 2.0 with Spring Security."},{"code":null,"e":4703,"s":4532,"text":"Let’s use the Spring initializer to create a maven project in Java 8. Let’s start by going to start.spring.io. We generate an application with the following dependencies−"},{"code":null,"e":4714,"s":4703,"text":"Spring Web"},{"code":null,"e":4730,"s":4714,"text":"Spring Security"},{"code":null,"e":4743,"s":4730,"text":"Cloud OAuth2"},{"code":null,"e":4764,"s":4743,"text":"Spring Boot Devtools"},{"code":null,"e":5113,"s":4764,"text":"With the above configuration, we click on the Generate button to generate a project. The project will be downloaded in a zip file. We extract the zip to a folder. We can then open the project in an IDE of our choice. I am using Spring Tools Suite here as it is optimized for spring applications. We can also use Eclipse or IntelliJ Idea as we wish."},{"code":null,"e":5293,"s":5113,"text":"So, we open the project in STS, let the dependencies get downloaded. Then we can see the project structure in our package explorer window. It should resemble the screenshot below."},{"code":null,"e":5428,"s":5293,"text":"If we open the pom.xml file we can view the dependencies and other details related to the project. It should look something like this."},{"code":null,"e":8181,"s":5428,"text":" \n \n 4.0.0 \n \n org.springframework.boot \n spring-boot-starter-parent \n 2.3.1.RELEASE \n \n \n com.tutorial \n spring.security.oauth2 \n 0.0.1-SNAPSHOT \n spring.security.oauth2 \n Demo project for Spring Boot \n \n 1.8 \n Hoxton.SR6 \n \n \n \n org.springframework.boot \n spring-boot-starter-security \n \n \n org.springframework.boot \n spring-boot-starter-web \n \n \n org.springframework.cloud \n spring-cloud-starter-oauth2 \n \n \n org.springframework.boot \n spring-boot-devtools\n runtime \n true \n \n \n org.springframework.boot \n spring-boot-starter-test \n test \n org.junit.vintage \n junit-vintage-engine \n \n \n \n \n org.springframework.security \n spring-security-test \n test \n \n \n \n \n \n org.springframework.cloud \n spring-cloud-dependencies \n ${spring-cloud.version} \n pom \n import \n \n \n \n \n \n org.springframework.boot\n spring-boot-maven-plugin \n \n \n \n"},{"code":null,"e":8352,"s":8181,"text":"Now, to the base package of our application, i.e., com.tutorial.spring.security.oauth2, let’s add a new package named config where we shall add our configuration classes."},{"code":null,"e":8628,"s":8352,"text":"Let’s create our first configuration class, UserConfig which extends the WebSecurityConfigurerAdapter class of Spring Security to manage the users of the client application. We annotate the class with @Configuration annotation to tell Spring that it is a configuration class."},{"code":null,"e":10231,"s":8628,"text":"package com.tutorial.spring.security.oauth2.config; \nimport org.springframework.context.annotation.Bean; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \nimport org.springframework.security.core.userdetails.User; \nimport org.springframework.security.core.userdetails.UserDetails;\nimport org.springframework.security.core.userdetails.UserDetailsService; \nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \nimport org.springframework.security.crypto.password.PasswordEncoder; \nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \nimport org.springframework.security.provisioning.UserDetailsManager; \n@Configuration public class UserConfig extends WebSecurityConfigurerAdapter { \n @Bean \n public UserDetailsService userDetailsService() {\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \n UserDetails user = User.withUsername(\"john\") \n .password(\"12345\") .authorities(\"read\") \n .build(); userDetailsManager.createUser(user); return userDetailsManager; \n } \n @Bean\n public PasswordEncoder passwordEncoder() { \n return NoOpPasswordEncoder.getInstance(); \n } \n @Override \n @Bean \n public AuthenticationManager authenticationManagerBean() throws Exception { \n return super.authenticationManagerBean(); \n } \n}"},{"code":null,"e":11081,"s":10231,"text":"We then add a bean of the UserDetailsService to retrieve the user details for authentication and authorization. To put it in the Spring context we annotate it with @Bean. To keep this tutorial simple and easy to understand, we use an InMemoryUserDetailsManager instance. For a real-world application, we can use other implementations like JdbcUserDetailsManager to connect to a database and so on. To be able to create users easily for this example we use the UserDetailsManager interface which extends the UserDetailsService and has methods like createUser(), updateUser() and so on. Then, we create a user using the builder class. We give him a username, password and a “read” authority for now. Then, using the createUser() method, we add the newly created user and return the instance of UserDetailsManager thus putting it in the Spring context."},{"code":null,"e":11780,"s":11081,"text":"To be able to use the UserDetailsService defined by us, it is necessary to provide a PasswordEncoder bean in the Spring context. Again, to keep it simple for now we use the NoOpPasswordEncoder. The NoOpPasswordEncoder should not be used otherwise for real-world applications for production as it is not secure. NoOpPasswordEncoder does not encode the password and is only useful for developing or testing scenarios or proof of concepts. We should always use the other highly secure options provided by Spring Security, the most popular of which is the BCryptPasswordEncoder, which we will be using later in our series of tutorials. To put it in the Spring context we annotate the method with @Bean."},{"code":null,"e":11971,"s":11780,"text":"We then override the AuthenticationManager bean method of WebSecurityConfigurerAdapter, which returns the authenticationManagerBean to put the authentication manager into the Spring context."},{"code":null,"e":12518,"s":11971,"text":"Now, to add the client configurations we add a new configuration class named AuthorizationServerConfig which extends AuthorizationServerConfigurerAdapter class of Spring Security. The AuthorizationServerConfigurerAdapter class is used to configure the authorization server using the spring security oauth2 module. We annotate this class with @Configuration as well. To add the authorization server functionality to this class we need to add the @EnableAuthorizationServer annotation so that the application can behave as an authorization server."},{"code":null,"e":13826,"s":12518,"text":"package com.tutorial.spring.security.oauth2.config; \nimport org.springframework.beans.factory.annotation.Autowired; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; @Configuration @EnableAuthorizationServer \npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter {\n @Autowired private AuthenticationManager authenticationManager; \n @Override \n public void configure(ClientDetailsServiceConfigurer clients) throws Exception { \n clients.inMemory() .withClient(\"oauthclient1\") .secret(\"oauthsecret1\") .scopes(\"read\") .authorizedGrantTypes(\"password\") } \n @Override \n public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \n endpoints.authenticationManager(authenticationManager); \n } \n}"},{"code":null,"e":14077,"s":13826,"text":"For checking oauth tokens, Spring Security oauth exposes two endpoints – /oauth/check_token and /oauth/token_key. These endpoints are protected by default behind denyAll(). tokenKeyAccess() and checkTokenAccess() methods open these endpoints for use."},{"code":null,"e":14210,"s":14077,"text":"We autowire the AuthenticationManager bean we configured in the UserConfig class as a dependency here which we shall be using later."},{"code":null,"e":14832,"s":14210,"text":"We then override two of the configure() methods of the AuthorizationServerConfigurerAdapter to provide an in-memory implementation of the client details service. The first method which uses the ClientDetailsServiceConfigurer as a parameter, as the name suggests, allows us to configure the clients for the authorization server. These clients represent the applications that will be able to use the functionality of this authorization server. Since this is a basic application for learning the implementation of OAuth2, we will keep things simple for now and use an in-memory implementation with the following attributes −"},{"code":null,"e":14875,"s":14832,"text":"clientId − the id of the client. Required."},{"code":null,"e":14918,"s":14875,"text":"clientId − the id of the client. Required."},{"code":null,"e":14975,"s":14918,"text":"secret − the client secret, required for trusted clients"},{"code":null,"e":15032,"s":14975,"text":"secret − the client secret, required for trusted clients"},{"code":null,"e":15178,"s":15032,"text":"scope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope."},{"code":null,"e":15324,"s":15178,"text":"scope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope."},{"code":null,"e":15633,"s":15324,"text":"authorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case."},{"code":null,"e":15942,"s":15633,"text":"authorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case."},{"code":null,"e":16187,"s":15942,"text":"In “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials for the authorization server we want the tokens from."},{"code":null,"e":16377,"s":16187,"text":"The other configure() method that we overrode, uses AuthorizationServerEndpointsConfigurer as a parameter, is used to attach the AuthenticationManager to authorization server configuration."},{"code":null,"e":16581,"s":16377,"text":"With these basic configurations, our Authorization server is ready to use. Let’s go ahead and start it and use it. We will be using Postman ( https://www.postman.com/downloads/ ) for making our requests."},{"code":null,"e":16855,"s":16581,"text":"When using STS, we can launch our application and start seeing see the logs in our console. When the application starts, we can find the oauth2 endpoints exposed by our application in the console. Of those endpoints, we will be using the following the below token for now −"},{"code":null,"e":16895,"s":16855,"text":"/oauth/token – for obtaining the token."},{"code":null,"e":16990,"s":16895,"text":"If we check the postman snapshot here, we can notice a few things. Let’s list them down below."},{"code":null,"e":17231,"s":16990,"text":"The URL − Our Spring Boot Application is running at port 8080 of our local machine, so the request is pointed to http://localhost:8080. The next part is /oauth/token, which we know, is the endpoint exposed by OAuth for generating the token."},{"code":null,"e":17506,"s":17231,"text":"The query params− Since this is a “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials to the authorization server we want the tokens from."},{"code":null,"e":17773,"s":17506,"text":"Client Authorization − The Oauth system requires the client to be authorized to be able to provide the token. Hence, under the Authorization header, we provide the client authentication information, namely username and password that we configured in our application."},{"code":null,"e":17849,"s":17773,"text":"Let’s take a closer look at the query params and the authorization header −"},{"code":null,"e":17866,"s":17849,"text":"The query params"},{"code":null,"e":17885,"s":17866,"text":"Client credentials"},{"code":null,"e":17999,"s":17885,"text":"If everything is correct, we shall be able to see our generated token in the response along with a 200 ok status."},{"code":null,"e":18012,"s":17999,"text":"The response"},{"code":null,"e":18182,"s":18012,"text":"We can test our server, by putting wrong credentials or no credentials, and we will get back an error which would say the request is unauthorized or has bad credentials."},{"code":null,"e":18298,"s":18182,"text":"This is our basic oauth authorization server, that uses the password grant type to generate and provide a password."},{"code":null,"e":18496,"s":18298,"text":"Next, let’s implement a more secure, and a more common application of the oauth2 authentication, i.e. with an authorization code grant type. We will update our current application for this purpose."},{"code":null,"e":19068,"s":18496,"text":"The authorization grant type is different from the password grant type in the sense that the user doesn’t have to share his credentials with the client application. He shares them with the authorization server only and in return authorization code is sent to the client which it uses to authenticate the client. It is more secure than the password grant type as user credentials are not shared with the client application and hence the user’s information stays safe. The client application doesn’t get access to any important user information unless approved by the user."},{"code":null,"e":19194,"s":19068,"text":"In a few simple steps, we can set up a basic oauth server with an authorization grant type in our application. Let’s see how."},{"code":null,"e":20709,"s":19194,"text":"package com.tutorial.spring.security.oauth2.config; \nimport org.springframework.beans.factory.annotation.Autowired; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager;\nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; \n@Configuration \n@EnableAuthorizationServer \npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter { \n @Autowired private AuthenticationManager authenticationManager; \n @Override \n public void configure(ClientDetailsServiceConfigurer clients) throws Exception {\n clients.inMemory() \n .withClient(\"oauthclient1\") \n .secret(\"oauthsecret1\")\n .scopes(\"read\") .authorizedGrantTypes(\"password\") \n .and() .withClient(\"oauthclient2\") .secret(\"oauthsecret2\") \n .scopes(\"read\") .authorizedGrantTypes(\"authorization_code\") \n .redirectUris(\"http://locahost:9090\"); \n }\n @Override public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \n endpoints.authenticationManager(authenticationManager); \n } \n}"},{"code":null,"e":21039,"s":20709,"text":"Let’s add a second client for this operation oauthclient2 for this operation with a new secret and read scope. Here we have changed the grant type to authorization code for this client. We also added a redirect URI so that the authorization server can callback the client. So, basically the redirect URI is the URI of the client."},{"code":null,"e":21397,"s":21039,"text":"Now, we have to establish a connection between the user and the authorization server. We have to set an interface for the authorization server where the user can provide the credentials. We use the formLogin() implementation of Spring Security to achieve that functionality while keeping things simple. We also make sure that all requests are authenticated."},{"code":null,"e":23191,"s":21397,"text":"package com.tutorial.spring.security.oauth2.config; \nimport org.springframework.context.annotation.Bean; \nimport org.springframework.context.annotation.Configuration; \nimport org.springframework.security.authentication.AuthenticationManager; \nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \nimport org.springframework.security.core.userdetails.User; \nimport org.springframework.security.core.userdetails.UserDetails; \nimport org.springframework.security.core.userdetails.UserDetailsService; \nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \nimport org.springframework.security.crypto.password.PasswordEncoder; \nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \nimport org.springframework.security.provisioning.UserDetailsManager; \n@SuppressWarnings(\"deprecation\") @Configuration \npublic class UserConfig extends WebSecurityConfigurerAdapter {\n @Bean\n public UserDetailsService userDetailsService() {\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \n UserDetails user = User.withUsername(\"john\") \n .password(\"12345\") .authorities(\"read\") .build(); \n userDetailsManager.createUser(user); return userDetailsManager; \n } \n @Bean public PasswordEncoder passwordEncoder() { \n return NoOpPasswordEncoder.getInstance(); \n } \n @Override \n @Bean \n public AuthenticationManager authenticationManagerBean() throws Exception {\n return super.authenticationManagerBean(); \n }\n @Override protected void configure(HttpSecurity http) throws Exception {\n http.formLogin(); http.authorizeRequests().anyRequest().authenticated(); \n } \n}"},{"code":null,"e":23488,"s":23191,"text":"This completes our setup for the authorization grant type. Now to test our setup and launch our application. We launch our browser at http://localhost:8080/oauth/authorize?response_type=code&client_id=oauthclient2&scope=read. We will redirected to the default form login page of Spring Security."},{"code":null,"e":23768,"s":23488,"text":"Here, the response type code implies that the authorization server will return an access code which will be used by the client to log in. When we use the user credentials we will be asked if I want to grant the permissions asked by the client, in a similar screen as shown below."},{"code":null,"e":24080,"s":23768,"text":"If we approve and click Authorize we shall see we are redirected to our given redirect url along with the access code. In our case the we are redirected to http://locahost:9090/?code=7Hibnw, as we specified in the application. We can use the code now as a client in Postman to login to the authorization server."},{"code":null,"e":24407,"s":24080,"text":"As we can see here, we have used the code received from the authorization server in our URL, and the grant_type as authorization_code and scope as read. We acted as the client and provided the client credentials as configured in our application. When we make this request we get back our access_token which we can use further."},{"code":null,"e":24718,"s":24407,"text":"So, we have seen how we can configure Spring Security with OAuth 2.0. The application is pretty simple and easy to understand and helps us understand the process fairly easily. We have used two kinds of authorization grant types and seen how we can use them to acquire access tokens for our client application."},{"code":null,"e":24752,"s":24718,"text":"\n 102 Lectures \n 8 hours \n"},{"code":null,"e":24766,"s":24752,"text":" Karthikeya T"},{"code":null,"e":24799,"s":24766,"text":"\n 39 Lectures \n 5 hours \n"},{"code":null,"e":24814,"s":24799,"text":" Chaand Sheikh"},{"code":null,"e":24849,"s":24814,"text":"\n 73 Lectures \n 5.5 hours \n"},{"code":null,"e":24861,"s":24849,"text":" Senol Atac"},{"code":null,"e":24896,"s":24861,"text":"\n 62 Lectures \n 4.5 hours \n"},{"code":null,"e":24908,"s":24896,"text":" Senol Atac"},{"code":null,"e":24943,"s":24908,"text":"\n 67 Lectures \n 4.5 hours \n"},{"code":null,"e":24955,"s":24943,"text":" Senol Atac"},{"code":null,"e":24988,"s":24955,"text":"\n 69 Lectures \n 5 hours \n"},{"code":null,"e":25000,"s":24988,"text":" Senol Atac"},{"code":null,"e":25007,"s":25000,"text":" Print"},{"code":null,"e":25018,"s":25007,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 1857,\n \"s\": 1835,\n \"text\": \"OAuth2.0 Fundamentals\"\n },\n {\n \"code\": null,\n \"e\": 1899,\n \"s\": 1857,\n \"text\": \"OAuth2.0 Getting started(Practical Guide)\"\n },\n {\n \"code\": null,\n \"e\": 2492,\n \"s\": 1899,\n \"text\": \"OAuth 2.0 was developed by IETF OAuth Working Group and published in October of 2012. It serves as an open authorization protocol for enabling a third party application to get limited access to an HTTP service on behalf of the resource owner. It can do so while not revealing the identity or the long-term credentials of the user. A third-party application itself can also use it on its behalf. The working principle of OAuth consists of the delegation of user authentication to a service hosting the user account and authorizing the third-party application access to the account of the user.\"\n },\n {\n \"code\": null,\n \"e\": 3108,\n \"s\": 2492,\n \"text\": \"Let us consider an example. Let us say we want to login to a website “clientsite.com”. We can sign in via Facebook, Github, Google or Microsoft. We select any options of the options given above, and we are redirected to the respective website for login. If login is successful, we are asked if we want to give clientsite.com access to the specific data requested by it. We select our desired option and we are redirected to clientsite.com with an authorization code or error code and our login is successful or not depending on our action in the third-party resource. This is the basic working principle of OAuth 2.\"\n },\n {\n \"code\": null,\n \"e\": 3185,\n \"s\": 3108,\n \"text\": \"There are five key actors involved in an OAuth system. Let’s list them out −\"\n },\n {\n \"code\": null,\n \"e\": 3327,\n \"s\": 3185,\n \"text\": \"User / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client.\"\n },\n {\n \"code\": null,\n \"e\": 3469,\n \"s\": 3327,\n \"text\": \"User / Resource Owner − The end-user, who is responsible for the authentication and for providing consent to share resources with the client.\"\n },\n {\n \"code\": null,\n \"e\": 3512,\n \"s\": 3469,\n \"text\": \"User-Agent − The browser used by the User.\"\n },\n {\n \"code\": null,\n \"e\": 3555,\n \"s\": 3512,\n \"text\": \"User-Agent − The browser used by the User.\"\n },\n {\n \"code\": null,\n \"e\": 3608,\n \"s\": 3555,\n \"text\": \"Client − The application requesting an access token.\"\n },\n {\n \"code\": null,\n \"e\": 3661,\n \"s\": 3608,\n \"text\": \"Client − The application requesting an access token.\"\n },\n {\n \"code\": null,\n \"e\": 3808,\n \"s\": 3661,\n \"text\": \"Authorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime.\"\n },\n {\n \"code\": null,\n \"e\": 3955,\n \"s\": 3808,\n \"text\": \"Authorization Server − The server that is used to authenticate the user/client. It issues access tokens and tracks them throughout their lifetime.\"\n },\n {\n \"code\": null,\n \"e\": 4088,\n \"s\": 3955,\n \"text\": \"Resource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization.\"\n },\n {\n \"code\": null,\n \"e\": 4221,\n \"s\": 4088,\n \"text\": \"Resource Server − The API that provides access to the requested resource. It validates the access tokens and provides authorization.\"\n },\n {\n \"code\": null,\n \"e\": 4532,\n \"s\": 4221,\n \"text\": \"We will be developing a Spring Boot Application with Spring Security and OAuth 2.0 to illustrate the above. We will be developing a basic application with an in-memory database to store user credentials now. The application will make it easy for us to understand the workings of OAuth 2.0 with Spring Security.\"\n },\n {\n \"code\": null,\n \"e\": 4703,\n \"s\": 4532,\n \"text\": \"Let’s use the Spring initializer to create a maven project in Java 8. Let’s start by going to start.spring.io. We generate an application with the following dependencies−\"\n },\n {\n \"code\": null,\n \"e\": 4714,\n \"s\": 4703,\n \"text\": \"Spring Web\"\n },\n {\n \"code\": null,\n \"e\": 4730,\n \"s\": 4714,\n \"text\": \"Spring Security\"\n },\n {\n \"code\": null,\n \"e\": 4743,\n \"s\": 4730,\n \"text\": \"Cloud OAuth2\"\n },\n {\n \"code\": null,\n \"e\": 4764,\n \"s\": 4743,\n \"text\": \"Spring Boot Devtools\"\n },\n {\n \"code\": null,\n \"e\": 5113,\n \"s\": 4764,\n \"text\": \"With the above configuration, we click on the Generate button to generate a project. The project will be downloaded in a zip file. We extract the zip to a folder. We can then open the project in an IDE of our choice. I am using Spring Tools Suite here as it is optimized for spring applications. We can also use Eclipse or IntelliJ Idea as we wish.\"\n },\n {\n \"code\": null,\n \"e\": 5293,\n \"s\": 5113,\n \"text\": \"So, we open the project in STS, let the dependencies get downloaded. Then we can see the project structure in our package explorer window. It should resemble the screenshot below.\"\n },\n {\n \"code\": null,\n \"e\": 5428,\n \"s\": 5293,\n \"text\": \"If we open the pom.xml file we can view the dependencies and other details related to the project. It should look something like this.\"\n },\n {\n \"code\": null,\n \"e\": 8181,\n \"s\": 5428,\n \"text\": \" \\n \\n 4.0.0 \\n \\n org.springframework.boot \\n spring-boot-starter-parent \\n 2.3.1.RELEASE \\n \\n \\n com.tutorial \\n spring.security.oauth2 \\n 0.0.1-SNAPSHOT \\n spring.security.oauth2 \\n Demo project for Spring Boot \\n \\n 1.8 \\n Hoxton.SR6 \\n \\n \\n \\n org.springframework.boot \\n spring-boot-starter-security \\n \\n \\n org.springframework.boot \\n spring-boot-starter-web \\n \\n \\n org.springframework.cloud \\n spring-cloud-starter-oauth2 \\n \\n \\n org.springframework.boot \\n spring-boot-devtools\\n runtime \\n true \\n \\n \\n org.springframework.boot \\n spring-boot-starter-test \\n test \\n org.junit.vintage \\n junit-vintage-engine \\n \\n \\n \\n \\n org.springframework.security \\n spring-security-test \\n test \\n \\n \\n \\n \\n \\n org.springframework.cloud \\n spring-cloud-dependencies \\n ${spring-cloud.version} \\n pom \\n import \\n \\n \\n \\n \\n \\n org.springframework.boot\\n spring-boot-maven-plugin \\n \\n \\n \\n\"\n },\n {\n \"code\": null,\n \"e\": 8352,\n \"s\": 8181,\n \"text\": \"Now, to the base package of our application, i.e., com.tutorial.spring.security.oauth2, let’s add a new package named config where we shall add our configuration classes.\"\n },\n {\n \"code\": null,\n \"e\": 8628,\n \"s\": 8352,\n \"text\": \"Let’s create our first configuration class, UserConfig which extends the WebSecurityConfigurerAdapter class of Spring Security to manage the users of the client application. We annotate the class with @Configuration annotation to tell Spring that it is a configuration class.\"\n },\n {\n \"code\": null,\n \"e\": 10231,\n \"s\": 8628,\n \"text\": \"package com.tutorial.spring.security.oauth2.config; \\nimport org.springframework.context.annotation.Bean; \\nimport org.springframework.context.annotation.Configuration; \\nimport org.springframework.security.authentication.AuthenticationManager; \\nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \\nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \\nimport org.springframework.security.core.userdetails.User; \\nimport org.springframework.security.core.userdetails.UserDetails;\\nimport org.springframework.security.core.userdetails.UserDetailsService; \\nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \\nimport org.springframework.security.crypto.password.PasswordEncoder; \\nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \\nimport org.springframework.security.provisioning.UserDetailsManager; \\n@Configuration public class UserConfig extends WebSecurityConfigurerAdapter { \\n @Bean \\n public UserDetailsService userDetailsService() {\\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \\n UserDetails user = User.withUsername(\\\"john\\\") \\n .password(\\\"12345\\\") .authorities(\\\"read\\\") \\n .build(); userDetailsManager.createUser(user); return userDetailsManager; \\n } \\n @Bean\\n public PasswordEncoder passwordEncoder() { \\n return NoOpPasswordEncoder.getInstance(); \\n } \\n @Override \\n @Bean \\n public AuthenticationManager authenticationManagerBean() throws Exception { \\n return super.authenticationManagerBean(); \\n } \\n}\"\n },\n {\n \"code\": null,\n \"e\": 11081,\n \"s\": 10231,\n \"text\": \"We then add a bean of the UserDetailsService to retrieve the user details for authentication and authorization. To put it in the Spring context we annotate it with @Bean. To keep this tutorial simple and easy to understand, we use an InMemoryUserDetailsManager instance. For a real-world application, we can use other implementations like JdbcUserDetailsManager to connect to a database and so on. To be able to create users easily for this example we use the UserDetailsManager interface which extends the UserDetailsService and has methods like createUser(), updateUser() and so on. Then, we create a user using the builder class. We give him a username, password and a “read” authority for now. Then, using the createUser() method, we add the newly created user and return the instance of UserDetailsManager thus putting it in the Spring context.\"\n },\n {\n \"code\": null,\n \"e\": 11780,\n \"s\": 11081,\n \"text\": \"To be able to use the UserDetailsService defined by us, it is necessary to provide a PasswordEncoder bean in the Spring context. Again, to keep it simple for now we use the NoOpPasswordEncoder. The NoOpPasswordEncoder should not be used otherwise for real-world applications for production as it is not secure. NoOpPasswordEncoder does not encode the password and is only useful for developing or testing scenarios or proof of concepts. We should always use the other highly secure options provided by Spring Security, the most popular of which is the BCryptPasswordEncoder, which we will be using later in our series of tutorials. To put it in the Spring context we annotate the method with @Bean.\"\n },\n {\n \"code\": null,\n \"e\": 11971,\n \"s\": 11780,\n \"text\": \"We then override the AuthenticationManager bean method of WebSecurityConfigurerAdapter, which returns the authenticationManagerBean to put the authentication manager into the Spring context.\"\n },\n {\n \"code\": null,\n \"e\": 12518,\n \"s\": 11971,\n \"text\": \"Now, to add the client configurations we add a new configuration class named AuthorizationServerConfig which extends AuthorizationServerConfigurerAdapter class of Spring Security. The AuthorizationServerConfigurerAdapter class is used to configure the authorization server using the spring security oauth2 module. We annotate this class with @Configuration as well. To add the authorization server functionality to this class we need to add the @EnableAuthorizationServer annotation so that the application can behave as an authorization server.\"\n },\n {\n \"code\": null,\n \"e\": 13826,\n \"s\": 12518,\n \"text\": \"package com.tutorial.spring.security.oauth2.config; \\nimport org.springframework.beans.factory.annotation.Autowired; \\nimport org.springframework.context.annotation.Configuration; \\nimport org.springframework.security.authentication.AuthenticationManager; \\nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \\nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \\nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \\nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; @Configuration @EnableAuthorizationServer \\npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter {\\n @Autowired private AuthenticationManager authenticationManager; \\n @Override \\n public void configure(ClientDetailsServiceConfigurer clients) throws Exception { \\n clients.inMemory() .withClient(\\\"oauthclient1\\\") .secret(\\\"oauthsecret1\\\") .scopes(\\\"read\\\") .authorizedGrantTypes(\\\"password\\\") } \\n @Override \\n public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \\n endpoints.authenticationManager(authenticationManager); \\n } \\n}\"\n },\n {\n \"code\": null,\n \"e\": 14077,\n \"s\": 13826,\n \"text\": \"For checking oauth tokens, Spring Security oauth exposes two endpoints – /oauth/check_token and /oauth/token_key. These endpoints are protected by default behind denyAll(). tokenKeyAccess() and checkTokenAccess() methods open these endpoints for use.\"\n },\n {\n \"code\": null,\n \"e\": 14210,\n \"s\": 14077,\n \"text\": \"We autowire the AuthenticationManager bean we configured in the UserConfig class as a dependency here which we shall be using later.\"\n },\n {\n \"code\": null,\n \"e\": 14832,\n \"s\": 14210,\n \"text\": \"We then override two of the configure() methods of the AuthorizationServerConfigurerAdapter to provide an in-memory implementation of the client details service. The first method which uses the ClientDetailsServiceConfigurer as a parameter, as the name suggests, allows us to configure the clients for the authorization server. These clients represent the applications that will be able to use the functionality of this authorization server. Since this is a basic application for learning the implementation of OAuth2, we will keep things simple for now and use an in-memory implementation with the following attributes −\"\n },\n {\n \"code\": null,\n \"e\": 14875,\n \"s\": 14832,\n \"text\": \"clientId − the id of the client. Required.\"\n },\n {\n \"code\": null,\n \"e\": 14918,\n \"s\": 14875,\n \"text\": \"clientId − the id of the client. Required.\"\n },\n {\n \"code\": null,\n \"e\": 14975,\n \"s\": 14918,\n \"text\": \"secret − the client secret, required for trusted clients\"\n },\n {\n \"code\": null,\n \"e\": 15032,\n \"s\": 14975,\n \"text\": \"secret − the client secret, required for trusted clients\"\n },\n {\n \"code\": null,\n \"e\": 15178,\n \"s\": 15032,\n \"text\": \"scope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope.\"\n },\n {\n \"code\": null,\n \"e\": 15324,\n \"s\": 15178,\n \"text\": \"scope − the limiting scope of the client, in other words, client permissions. If left empty or undefined, the client is not limited by any scope.\"\n },\n {\n \"code\": null,\n \"e\": 15633,\n \"s\": 15324,\n \"text\": \"authorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case.\"\n },\n {\n \"code\": null,\n \"e\": 15942,\n \"s\": 15633,\n \"text\": \"authorizedGrantTypes − the grant types that the client is authorized to use. The grant type denotes the way by which the client obtains the token from the authorization server. We will be using the “password” grant type as it is the simplest. Later, we shall be using another grant type for another use-case.\"\n },\n {\n \"code\": null,\n \"e\": 16187,\n \"s\": 15942,\n \"text\": \"In “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials for the authorization server we want the tokens from.\"\n },\n {\n \"code\": null,\n \"e\": 16377,\n \"s\": 16187,\n \"text\": \"The other configure() method that we overrode, uses AuthorizationServerEndpointsConfigurer as a parameter, is used to attach the AuthenticationManager to authorization server configuration.\"\n },\n {\n \"code\": null,\n \"e\": 16581,\n \"s\": 16377,\n \"text\": \"With these basic configurations, our Authorization server is ready to use. Let’s go ahead and start it and use it. We will be using Postman ( https://www.postman.com/downloads/ ) for making our requests.\"\n },\n {\n \"code\": null,\n \"e\": 16855,\n \"s\": 16581,\n \"text\": \"When using STS, we can launch our application and start seeing see the logs in our console. When the application starts, we can find the oauth2 endpoints exposed by our application in the console. Of those endpoints, we will be using the following the below token for now −\"\n },\n {\n \"code\": null,\n \"e\": 16895,\n \"s\": 16855,\n \"text\": \"/oauth/token – for obtaining the token.\"\n },\n {\n \"code\": null,\n \"e\": 16990,\n \"s\": 16895,\n \"text\": \"If we check the postman snapshot here, we can notice a few things. Let’s list them down below.\"\n },\n {\n \"code\": null,\n \"e\": 17231,\n \"s\": 16990,\n \"text\": \"The URL − Our Spring Boot Application is running at port 8080 of our local machine, so the request is pointed to http://localhost:8080. The next part is /oauth/token, which we know, is the endpoint exposed by OAuth for generating the token.\"\n },\n {\n \"code\": null,\n \"e\": 17506,\n \"s\": 17231,\n \"text\": \"The query params− Since this is a “password” authorization grant type, the user needs to provide his/her username, password and scope to our client application, which then uses those credentials along with its credentials to the authorization server we want the tokens from.\"\n },\n {\n \"code\": null,\n \"e\": 17773,\n \"s\": 17506,\n \"text\": \"Client Authorization − The Oauth system requires the client to be authorized to be able to provide the token. Hence, under the Authorization header, we provide the client authentication information, namely username and password that we configured in our application.\"\n },\n {\n \"code\": null,\n \"e\": 17849,\n \"s\": 17773,\n \"text\": \"Let’s take a closer look at the query params and the authorization header −\"\n },\n {\n \"code\": null,\n \"e\": 17866,\n \"s\": 17849,\n \"text\": \"The query params\"\n },\n {\n \"code\": null,\n \"e\": 17885,\n \"s\": 17866,\n \"text\": \"Client credentials\"\n },\n {\n \"code\": null,\n \"e\": 17999,\n \"s\": 17885,\n \"text\": \"If everything is correct, we shall be able to see our generated token in the response along with a 200 ok status.\"\n },\n {\n \"code\": null,\n \"e\": 18012,\n \"s\": 17999,\n \"text\": \"The response\"\n },\n {\n \"code\": null,\n \"e\": 18182,\n \"s\": 18012,\n \"text\": \"We can test our server, by putting wrong credentials or no credentials, and we will get back an error which would say the request is unauthorized or has bad credentials.\"\n },\n {\n \"code\": null,\n \"e\": 18298,\n \"s\": 18182,\n \"text\": \"This is our basic oauth authorization server, that uses the password grant type to generate and provide a password.\"\n },\n {\n \"code\": null,\n \"e\": 18496,\n \"s\": 18298,\n \"text\": \"Next, let’s implement a more secure, and a more common application of the oauth2 authentication, i.e. with an authorization code grant type. We will update our current application for this purpose.\"\n },\n {\n \"code\": null,\n \"e\": 19068,\n \"s\": 18496,\n \"text\": \"The authorization grant type is different from the password grant type in the sense that the user doesn’t have to share his credentials with the client application. He shares them with the authorization server only and in return authorization code is sent to the client which it uses to authenticate the client. It is more secure than the password grant type as user credentials are not shared with the client application and hence the user’s information stays safe. The client application doesn’t get access to any important user information unless approved by the user.\"\n },\n {\n \"code\": null,\n \"e\": 19194,\n \"s\": 19068,\n \"text\": \"In a few simple steps, we can set up a basic oauth server with an authorization grant type in our application. Let’s see how.\"\n },\n {\n \"code\": null,\n \"e\": 20709,\n \"s\": 19194,\n \"text\": \"package com.tutorial.spring.security.oauth2.config; \\nimport org.springframework.beans.factory.annotation.Autowired; \\nimport org.springframework.context.annotation.Configuration; \\nimport org.springframework.security.authentication.AuthenticationManager;\\nimport org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer; \\nimport org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter; \\nimport org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer; \\nimport org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer; \\n@Configuration \\n@EnableAuthorizationServer \\npublic class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter { \\n @Autowired private AuthenticationManager authenticationManager; \\n @Override \\n public void configure(ClientDetailsServiceConfigurer clients) throws Exception {\\n clients.inMemory() \\n .withClient(\\\"oauthclient1\\\") \\n .secret(\\\"oauthsecret1\\\")\\n .scopes(\\\"read\\\") .authorizedGrantTypes(\\\"password\\\") \\n .and() .withClient(\\\"oauthclient2\\\") .secret(\\\"oauthsecret2\\\") \\n .scopes(\\\"read\\\") .authorizedGrantTypes(\\\"authorization_code\\\") \\n .redirectUris(\\\"http://locahost:9090\\\"); \\n }\\n @Override public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception { \\n endpoints.authenticationManager(authenticationManager); \\n } \\n}\"\n },\n {\n \"code\": null,\n \"e\": 21039,\n \"s\": 20709,\n \"text\": \"Let’s add a second client for this operation oauthclient2 for this operation with a new secret and read scope. Here we have changed the grant type to authorization code for this client. We also added a redirect URI so that the authorization server can callback the client. So, basically the redirect URI is the URI of the client.\"\n },\n {\n \"code\": null,\n \"e\": 21397,\n \"s\": 21039,\n \"text\": \"Now, we have to establish a connection between the user and the authorization server. We have to set an interface for the authorization server where the user can provide the credentials. We use the formLogin() implementation of Spring Security to achieve that functionality while keeping things simple. We also make sure that all requests are authenticated.\"\n },\n {\n \"code\": null,\n \"e\": 23191,\n \"s\": 21397,\n \"text\": \"package com.tutorial.spring.security.oauth2.config; \\nimport org.springframework.context.annotation.Bean; \\nimport org.springframework.context.annotation.Configuration; \\nimport org.springframework.security.authentication.AuthenticationManager; \\nimport org.springframework.security.config.annotation.web.builders.HttpSecurity; \\nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; \\nimport org.springframework.security.core.userdetails.User; \\nimport org.springframework.security.core.userdetails.UserDetails; \\nimport org.springframework.security.core.userdetails.UserDetailsService; \\nimport org.springframework.security.crypto.password.NoOpPasswordEncoder; \\nimport org.springframework.security.crypto.password.PasswordEncoder; \\nimport org.springframework.security.provisioning.InMemoryUserDetailsManager; \\nimport org.springframework.security.provisioning.UserDetailsManager; \\n@SuppressWarnings(\\\"deprecation\\\") @Configuration \\npublic class UserConfig extends WebSecurityConfigurerAdapter {\\n @Bean\\n public UserDetailsService userDetailsService() {\\n UserDetailsManager userDetailsManager = new InMemoryUserDetailsManager(); \\n UserDetails user = User.withUsername(\\\"john\\\") \\n .password(\\\"12345\\\") .authorities(\\\"read\\\") .build(); \\n userDetailsManager.createUser(user); return userDetailsManager; \\n } \\n @Bean public PasswordEncoder passwordEncoder() { \\n return NoOpPasswordEncoder.getInstance(); \\n } \\n @Override \\n @Bean \\n public AuthenticationManager authenticationManagerBean() throws Exception {\\n return super.authenticationManagerBean(); \\n }\\n @Override protected void configure(HttpSecurity http) throws Exception {\\n http.formLogin(); http.authorizeRequests().anyRequest().authenticated(); \\n } \\n}\"\n },\n {\n \"code\": null,\n \"e\": 23488,\n \"s\": 23191,\n \"text\": \"This completes our setup for the authorization grant type. Now to test our setup and launch our application. We launch our browser at http://localhost:8080/oauth/authorize?response_type=code&client_id=oauthclient2&scope=read. We will redirected to the default form login page of Spring Security.\"\n },\n {\n \"code\": null,\n \"e\": 23768,\n \"s\": 23488,\n \"text\": \"Here, the response type code implies that the authorization server will return an access code which will be used by the client to log in. When we use the user credentials we will be asked if I want to grant the permissions asked by the client, in a similar screen as shown below.\"\n },\n {\n \"code\": null,\n \"e\": 24080,\n \"s\": 23768,\n \"text\": \"If we approve and click Authorize we shall see we are redirected to our given redirect url along with the access code. In our case the we are redirected to http://locahost:9090/?code=7Hibnw, as we specified in the application. We can use the code now as a client in Postman to login to the authorization server.\"\n },\n {\n \"code\": null,\n \"e\": 24407,\n \"s\": 24080,\n \"text\": \"As we can see here, we have used the code received from the authorization server in our URL, and the grant_type as authorization_code and scope as read. We acted as the client and provided the client credentials as configured in our application. When we make this request we get back our access_token which we can use further.\"\n },\n {\n \"code\": null,\n \"e\": 24718,\n \"s\": 24407,\n \"text\": \"So, we have seen how we can configure Spring Security with OAuth 2.0. The application is pretty simple and easy to understand and helps us understand the process fairly easily. We have used two kinds of authorization grant types and seen how we can use them to acquire access tokens for our client application.\"\n },\n {\n \"code\": null,\n \"e\": 24752,\n \"s\": 24718,\n \"text\": \"\\n 102 Lectures \\n 8 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 24766,\n \"s\": 24752,\n \"text\": \" Karthikeya T\"\n },\n {\n \"code\": null,\n \"e\": 24799,\n \"s\": 24766,\n \"text\": \"\\n 39 Lectures \\n 5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 24814,\n \"s\": 24799,\n \"text\": \" Chaand Sheikh\"\n },\n {\n \"code\": null,\n \"e\": 24849,\n \"s\": 24814,\n \"text\": \"\\n 73 Lectures \\n 5.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 24861,\n \"s\": 24849,\n \"text\": \" Senol Atac\"\n },\n {\n \"code\": null,\n \"e\": 24896,\n \"s\": 24861,\n \"text\": \"\\n 62 Lectures \\n 4.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 24908,\n \"s\": 24896,\n \"text\": \" Senol Atac\"\n },\n {\n \"code\": null,\n \"e\": 24943,\n \"s\": 24908,\n \"text\": \"\\n 67 Lectures \\n 4.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 24955,\n \"s\": 24943,\n \"text\": \" Senol Atac\"\n },\n {\n \"code\": null,\n \"e\": 24988,\n \"s\": 24955,\n \"text\": \"\\n 69 Lectures \\n 5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 25000,\n \"s\": 24988,\n \"text\": \" Senol Atac\"\n },\n {\n \"code\": null,\n \"e\": 25007,\n \"s\": 25000,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 25018,\n \"s\": 25007,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":53,"cells":{"title":{"kind":"string","value":"How to Migrate Your Python Machine Learning model to Other Languages | by Roman Orac | Towards Data Science"},"text":{"kind":"string","value":"I recently worked on a project, where I needed to train a Machine Learning model that would run on the Edge — meaning, the processing and prediction occur on the device that collects the data.\nAs usual, I did my Machine Learning part in Python and I haven’t thought much about how we’re going to port my ML stuff to the edge device, which was written in Java.\nWhen the modeling part was nearing the end, I started researching how to load a LightGBM model in Java. Prior to this, I had a discussion with a colleague who recommended that I retrain the model with the XGBoost model, which can be loaded in Java with XGBoost4J dependency.\nLightGBM and XGBoost are both gradient boosting libraries with a few differences. I would expect to get a similar model if I decided to retrain the model with XGBoost, but I didn’t want to rerun all the experiments as there had to be a better way.\nTo my luck, I found a simple way to load any Machine Learning model in Python to any other language.\nBy reading this article, you’ll learn:\nWhat is PMML?\nHow to save a Python model to PMML format?\nHow to load the PMML model in Java?\nHow to make predictions with the PMML model in Java?\nHere are few links that might interest you:\n- Complete your Python analyses 10x faster with Mito [Product]- Free skill tests for Data Scientists & ML Engineers [Test]- All New Self-Driving Car Engineer Nanodegree [Course]\nWould you like to read more such articles? If so, you can support me by clicking on any links above. Some of them are affiliate links, but you don’t need to buy anything.\nFrom Wikipedia:\nThe Predictive Model Markup Language (PMML) is an XML-based predictive model interchange format conceived by Dr. Robert Lee Grossman, then the director of the National Center for Data Mining at the University of Illinois at Chicago. PMML provides a way for analytic applications to describe and exchange predictive models produced by data mining and machine learning algorithms.\nPMML supports:\nNeural Networks\nSupport Vector Machines\nAssociation rules\nNaive Bayes classifier\nClustering models\nText models\nDecision trees (Random forest)\nGradient Boosting (LightGBM and XGBoost)\nRegression models\nPMML enables us to load a Machine Learning model, that was trained in Python, in Java, Go lang, C++, Ruby and others.\nMy first thought after learning about PMML was that I would need to radically refactor the code, which would make retraining the model with XGBoost more feasible.\nAfter thinking about it, I decided to give PMML a try. It has a well-maintained repository with clear instructions — which is always a good sign.\nYou can simply install the PMML package with:\npip install sklearn2pmml\nsklearn2pmml package is needed to export the Python Machine Learning model to PMML format. Using it is simple, we just need to wrap the classifier with PMMLPipeline class.\nTo make it easier for you, I wrote a simple gist that trains a LightGBM model on the Iris dataset and exports the model to PMML format:\nImport required Python packagesLoad Iris datasetSplit Iris dataset to train and test setsTrain LightGBM model with PMML support — this is the only required change in your code.Measure classification accuracy of the model.And finally, save the model to the PMML format.\nImport required Python packages\nLoad Iris dataset\nSplit Iris dataset to train and test sets\nTrain LightGBM model with PMML support — this is the only required change in your code.\nMeasure classification accuracy of the model.\nAnd finally, save the model to the PMML format.\nThe code above creates a PMML file, which is an XML file. The XML contains all the model details as seen in the image below.\nWe trained the model with Python and exported it to PMML format, now we need to load it in Java.\nI created a minimalistic repository LoadPMMLModel on Github, which shows how to load a PMML model in Java.\nThe first step is to add a PMML dependency to pom.xml (I’m using maven dependency manager):\n\torg.jpmml\tpmml-evaluator\t1.5.15 features = new HashMap<>();features.put(\"sepal length (cm)\", 6.1);features.put(\"sepal width (cm)\", 2.8);features.put(\"petal length (cm)\", 4.7);features.put(\"petal width (cm)\", 1.2);Map arguments = new LinkedHashMap<>();for (InputField inputField : inputFields) { FieldName inputName = inputField.getName(); Double value = features.get(inputName.toString()); FieldValue inputValue = inputField.prepare(value); arguments.put(inputName, inputValue);}\nAnd query the model in Java for prediction:\nMap results = evaluator.evaluate(arguments);// Extracting predictionMap resultRecord = EvaluatorUtil.decodeAll(results);Integer yPred = (Integer) resultRecord.get(targetName.toString());System.out.printf(\"Prediction is %d\\n\", yPred);System.out.printf(\"PMML output %s\\n\", resultRecord);\nWith the code above, we get the following output:\nIn my Machine Learning learning project, I used a regression boosting model. To my surprise, the exported PMML model produced the same results to the fifth decimal as the model in Python.\nI don’t have anything bad to say about PMML as it works reliably in production.\nRemember, you don’t need to copy-paste the code from this article as I created LoadPMMLModel repository on Github.\nPlease let me know what are your thoughts about PMML.\nFollow me on Twitter, where I regularly tweet about Data Science and Machine Learning."},"parsed":{"kind":"list like","value":[{"code":null,"e":364,"s":171,"text":"I recently worked on a project, where I needed to train a Machine Learning model that would run on the Edge — meaning, the processing and prediction occur on the device that collects the data."},{"code":null,"e":531,"s":364,"text":"As usual, I did my Machine Learning part in Python and I haven’t thought much about how we’re going to port my ML stuff to the edge device, which was written in Java."},{"code":null,"e":806,"s":531,"text":"When the modeling part was nearing the end, I started researching how to load a LightGBM model in Java. Prior to this, I had a discussion with a colleague who recommended that I retrain the model with the XGBoost model, which can be loaded in Java with XGBoost4J dependency."},{"code":null,"e":1054,"s":806,"text":"LightGBM and XGBoost are both gradient boosting libraries with a few differences. I would expect to get a similar model if I decided to retrain the model with XGBoost, but I didn’t want to rerun all the experiments as there had to be a better way."},{"code":null,"e":1155,"s":1054,"text":"To my luck, I found a simple way to load any Machine Learning model in Python to any other language."},{"code":null,"e":1194,"s":1155,"text":"By reading this article, you’ll learn:"},{"code":null,"e":1208,"s":1194,"text":"What is PMML?"},{"code":null,"e":1251,"s":1208,"text":"How to save a Python model to PMML format?"},{"code":null,"e":1287,"s":1251,"text":"How to load the PMML model in Java?"},{"code":null,"e":1340,"s":1287,"text":"How to make predictions with the PMML model in Java?"},{"code":null,"e":1384,"s":1340,"text":"Here are few links that might interest you:"},{"code":null,"e":1562,"s":1384,"text":"- Complete your Python analyses 10x faster with Mito [Product]- Free skill tests for Data Scientists & ML Engineers [Test]- All New Self-Driving Car Engineer Nanodegree [Course]"},{"code":null,"e":1733,"s":1562,"text":"Would you like to read more such articles? If so, you can support me by clicking on any links above. Some of them are affiliate links, but you don’t need to buy anything."},{"code":null,"e":1749,"s":1733,"text":"From Wikipedia:"},{"code":null,"e":2128,"s":1749,"text":"The Predictive Model Markup Language (PMML) is an XML-based predictive model interchange format conceived by Dr. Robert Lee Grossman, then the director of the National Center for Data Mining at the University of Illinois at Chicago. PMML provides a way for analytic applications to describe and exchange predictive models produced by data mining and machine learning algorithms."},{"code":null,"e":2143,"s":2128,"text":"PMML supports:"},{"code":null,"e":2159,"s":2143,"text":"Neural Networks"},{"code":null,"e":2183,"s":2159,"text":"Support Vector Machines"},{"code":null,"e":2201,"s":2183,"text":"Association rules"},{"code":null,"e":2224,"s":2201,"text":"Naive Bayes classifier"},{"code":null,"e":2242,"s":2224,"text":"Clustering models"},{"code":null,"e":2254,"s":2242,"text":"Text models"},{"code":null,"e":2285,"s":2254,"text":"Decision trees (Random forest)"},{"code":null,"e":2326,"s":2285,"text":"Gradient Boosting (LightGBM and XGBoost)"},{"code":null,"e":2344,"s":2326,"text":"Regression models"},{"code":null,"e":2462,"s":2344,"text":"PMML enables us to load a Machine Learning model, that was trained in Python, in Java, Go lang, C++, Ruby and others."},{"code":null,"e":2625,"s":2462,"text":"My first thought after learning about PMML was that I would need to radically refactor the code, which would make retraining the model with XGBoost more feasible."},{"code":null,"e":2771,"s":2625,"text":"After thinking about it, I decided to give PMML a try. It has a well-maintained repository with clear instructions — which is always a good sign."},{"code":null,"e":2817,"s":2771,"text":"You can simply install the PMML package with:"},{"code":null,"e":2842,"s":2817,"text":"pip install sklearn2pmml"},{"code":null,"e":3014,"s":2842,"text":"sklearn2pmml package is needed to export the Python Machine Learning model to PMML format. Using it is simple, we just need to wrap the classifier with PMMLPipeline class."},{"code":null,"e":3150,"s":3014,"text":"To make it easier for you, I wrote a simple gist that trains a LightGBM model on the Iris dataset and exports the model to PMML format:"},{"code":null,"e":3419,"s":3150,"text":"Import required Python packagesLoad Iris datasetSplit Iris dataset to train and test setsTrain LightGBM model with PMML support — this is the only required change in your code.Measure classification accuracy of the model.And finally, save the model to the PMML format."},{"code":null,"e":3451,"s":3419,"text":"Import required Python packages"},{"code":null,"e":3469,"s":3451,"text":"Load Iris dataset"},{"code":null,"e":3511,"s":3469,"text":"Split Iris dataset to train and test sets"},{"code":null,"e":3599,"s":3511,"text":"Train LightGBM model with PMML support — this is the only required change in your code."},{"code":null,"e":3645,"s":3599,"text":"Measure classification accuracy of the model."},{"code":null,"e":3693,"s":3645,"text":"And finally, save the model to the PMML format."},{"code":null,"e":3818,"s":3693,"text":"The code above creates a PMML file, which is an XML file. The XML contains all the model details as seen in the image below."},{"code":null,"e":3915,"s":3818,"text":"We trained the model with Python and exported it to PMML format, now we need to load it in Java."},{"code":null,"e":4022,"s":3915,"text":"I created a minimalistic repository LoadPMMLModel on Github, which shows how to load a PMML model in Java."},{"code":null,"e":4114,"s":4022,"text":"The first step is to add a PMML dependency to pom.xml (I’m using maven dependency manager):"},{"code":null,"e":4234,"s":4114,"text":"\torg.jpmml\tpmml-evaluator\t1.5.15 features = new HashMap<>();features.put(\"sepal length (cm)\", 6.1);features.put(\"sepal width (cm)\", 2.8);features.put(\"petal length (cm)\", 4.7);features.put(\"petal width (cm)\", 1.2);Map arguments = new LinkedHashMap<>();for (InputField inputField : inputFields) { FieldName inputName = inputField.getName(); Double value = features.get(inputName.toString()); FieldValue inputValue = inputField.prepare(value); arguments.put(inputName, inputValue);}"},{"code":null,"e":5565,"s":5521,"text":"And query the model in Java for prediction:"},{"code":null,"e":5876,"s":5565,"text":"Map results = evaluator.evaluate(arguments);// Extracting predictionMap resultRecord = EvaluatorUtil.decodeAll(results);Integer yPred = (Integer) resultRecord.get(targetName.toString());System.out.printf(\"Prediction is %d\\n\", yPred);System.out.printf(\"PMML output %s\\n\", resultRecord);"},{"code":null,"e":5926,"s":5876,"text":"With the code above, we get the following output:"},{"code":null,"e":6114,"s":5926,"text":"In my Machine Learning learning project, I used a regression boosting model. To my surprise, the exported PMML model produced the same results to the fifth decimal as the model in Python."},{"code":null,"e":6194,"s":6114,"text":"I don’t have anything bad to say about PMML as it works reliably in production."},{"code":null,"e":6309,"s":6194,"text":"Remember, you don’t need to copy-paste the code from this article as I created LoadPMMLModel repository on Github."},{"code":null,"e":6363,"s":6309,"text":"Please let me know what are your thoughts about PMML."}],"string":"[\n {\n \"code\": null,\n \"e\": 364,\n \"s\": 171,\n \"text\": \"I recently worked on a project, where I needed to train a Machine Learning model that would run on the Edge — meaning, the processing and prediction occur on the device that collects the data.\"\n },\n {\n \"code\": null,\n \"e\": 531,\n \"s\": 364,\n \"text\": \"As usual, I did my Machine Learning part in Python and I haven’t thought much about how we’re going to port my ML stuff to the edge device, which was written in Java.\"\n },\n {\n \"code\": null,\n \"e\": 806,\n \"s\": 531,\n \"text\": \"When the modeling part was nearing the end, I started researching how to load a LightGBM model in Java. Prior to this, I had a discussion with a colleague who recommended that I retrain the model with the XGBoost model, which can be loaded in Java with XGBoost4J dependency.\"\n },\n {\n \"code\": null,\n \"e\": 1054,\n \"s\": 806,\n \"text\": \"LightGBM and XGBoost are both gradient boosting libraries with a few differences. I would expect to get a similar model if I decided to retrain the model with XGBoost, but I didn’t want to rerun all the experiments as there had to be a better way.\"\n },\n {\n \"code\": null,\n \"e\": 1155,\n \"s\": 1054,\n \"text\": \"To my luck, I found a simple way to load any Machine Learning model in Python to any other language.\"\n },\n {\n \"code\": null,\n \"e\": 1194,\n \"s\": 1155,\n \"text\": \"By reading this article, you’ll learn:\"\n },\n {\n \"code\": null,\n \"e\": 1208,\n \"s\": 1194,\n \"text\": \"What is PMML?\"\n },\n {\n \"code\": null,\n \"e\": 1251,\n \"s\": 1208,\n \"text\": \"How to save a Python model to PMML format?\"\n },\n {\n \"code\": null,\n \"e\": 1287,\n \"s\": 1251,\n \"text\": \"How to load the PMML model in Java?\"\n },\n {\n \"code\": null,\n \"e\": 1340,\n \"s\": 1287,\n \"text\": \"How to make predictions with the PMML model in Java?\"\n },\n {\n \"code\": null,\n \"e\": 1384,\n \"s\": 1340,\n \"text\": \"Here are few links that might interest you:\"\n },\n {\n \"code\": null,\n \"e\": 1562,\n \"s\": 1384,\n \"text\": \"- Complete your Python analyses 10x faster with Mito [Product]- Free skill tests for Data Scientists & ML Engineers [Test]- All New Self-Driving Car Engineer Nanodegree [Course]\"\n },\n {\n \"code\": null,\n \"e\": 1733,\n \"s\": 1562,\n \"text\": \"Would you like to read more such articles? If so, you can support me by clicking on any links above. Some of them are affiliate links, but you don’t need to buy anything.\"\n },\n {\n \"code\": null,\n \"e\": 1749,\n \"s\": 1733,\n \"text\": \"From Wikipedia:\"\n },\n {\n \"code\": null,\n \"e\": 2128,\n \"s\": 1749,\n \"text\": \"The Predictive Model Markup Language (PMML) is an XML-based predictive model interchange format conceived by Dr. Robert Lee Grossman, then the director of the National Center for Data Mining at the University of Illinois at Chicago. PMML provides a way for analytic applications to describe and exchange predictive models produced by data mining and machine learning algorithms.\"\n },\n {\n \"code\": null,\n \"e\": 2143,\n \"s\": 2128,\n \"text\": \"PMML supports:\"\n },\n {\n \"code\": null,\n \"e\": 2159,\n \"s\": 2143,\n \"text\": \"Neural Networks\"\n },\n {\n \"code\": null,\n \"e\": 2183,\n \"s\": 2159,\n \"text\": \"Support Vector Machines\"\n },\n {\n \"code\": null,\n \"e\": 2201,\n \"s\": 2183,\n \"text\": \"Association rules\"\n },\n {\n \"code\": null,\n \"e\": 2224,\n \"s\": 2201,\n \"text\": \"Naive Bayes classifier\"\n },\n {\n \"code\": null,\n \"e\": 2242,\n \"s\": 2224,\n \"text\": \"Clustering models\"\n },\n {\n \"code\": null,\n \"e\": 2254,\n \"s\": 2242,\n \"text\": \"Text models\"\n },\n {\n \"code\": null,\n \"e\": 2285,\n \"s\": 2254,\n \"text\": \"Decision trees (Random forest)\"\n },\n {\n \"code\": null,\n \"e\": 2326,\n \"s\": 2285,\n \"text\": \"Gradient Boosting (LightGBM and XGBoost)\"\n },\n {\n \"code\": null,\n \"e\": 2344,\n \"s\": 2326,\n \"text\": \"Regression models\"\n },\n {\n \"code\": null,\n \"e\": 2462,\n \"s\": 2344,\n \"text\": \"PMML enables us to load a Machine Learning model, that was trained in Python, in Java, Go lang, C++, Ruby and others.\"\n },\n {\n \"code\": null,\n \"e\": 2625,\n \"s\": 2462,\n \"text\": \"My first thought after learning about PMML was that I would need to radically refactor the code, which would make retraining the model with XGBoost more feasible.\"\n },\n {\n \"code\": null,\n \"e\": 2771,\n \"s\": 2625,\n \"text\": \"After thinking about it, I decided to give PMML a try. It has a well-maintained repository with clear instructions — which is always a good sign.\"\n },\n {\n \"code\": null,\n \"e\": 2817,\n \"s\": 2771,\n \"text\": \"You can simply install the PMML package with:\"\n },\n {\n \"code\": null,\n \"e\": 2842,\n \"s\": 2817,\n \"text\": \"pip install sklearn2pmml\"\n },\n {\n \"code\": null,\n \"e\": 3014,\n \"s\": 2842,\n \"text\": \"sklearn2pmml package is needed to export the Python Machine Learning model to PMML format. Using it is simple, we just need to wrap the classifier with PMMLPipeline class.\"\n },\n {\n \"code\": null,\n \"e\": 3150,\n \"s\": 3014,\n \"text\": \"To make it easier for you, I wrote a simple gist that trains a LightGBM model on the Iris dataset and exports the model to PMML format:\"\n },\n {\n \"code\": null,\n \"e\": 3419,\n \"s\": 3150,\n \"text\": \"Import required Python packagesLoad Iris datasetSplit Iris dataset to train and test setsTrain LightGBM model with PMML support — this is the only required change in your code.Measure classification accuracy of the model.And finally, save the model to the PMML format.\"\n },\n {\n \"code\": null,\n \"e\": 3451,\n \"s\": 3419,\n \"text\": \"Import required Python packages\"\n },\n {\n \"code\": null,\n \"e\": 3469,\n \"s\": 3451,\n \"text\": \"Load Iris dataset\"\n },\n {\n \"code\": null,\n \"e\": 3511,\n \"s\": 3469,\n \"text\": \"Split Iris dataset to train and test sets\"\n },\n {\n \"code\": null,\n \"e\": 3599,\n \"s\": 3511,\n \"text\": \"Train LightGBM model with PMML support — this is the only required change in your code.\"\n },\n {\n \"code\": null,\n \"e\": 3645,\n \"s\": 3599,\n \"text\": \"Measure classification accuracy of the model.\"\n },\n {\n \"code\": null,\n \"e\": 3693,\n \"s\": 3645,\n \"text\": \"And finally, save the model to the PMML format.\"\n },\n {\n \"code\": null,\n \"e\": 3818,\n \"s\": 3693,\n \"text\": \"The code above creates a PMML file, which is an XML file. The XML contains all the model details as seen in the image below.\"\n },\n {\n \"code\": null,\n \"e\": 3915,\n \"s\": 3818,\n \"text\": \"We trained the model with Python and exported it to PMML format, now we need to load it in Java.\"\n },\n {\n \"code\": null,\n \"e\": 4022,\n \"s\": 3915,\n \"text\": \"I created a minimalistic repository LoadPMMLModel on Github, which shows how to load a PMML model in Java.\"\n },\n {\n \"code\": null,\n \"e\": 4114,\n \"s\": 4022,\n \"text\": \"The first step is to add a PMML dependency to pom.xml (I’m using maven dependency manager):\"\n },\n {\n \"code\": null,\n \"e\": 4234,\n \"s\": 4114,\n \"text\": \"\\torg.jpmml\\tpmml-evaluator\\t1.5.15 features = new HashMap<>();features.put(\\\"sepal length (cm)\\\", 6.1);features.put(\\\"sepal width (cm)\\\", 2.8);features.put(\\\"petal length (cm)\\\", 4.7);features.put(\\\"petal width (cm)\\\", 1.2);Map arguments = new LinkedHashMap<>();for (InputField inputField : inputFields) { FieldName inputName = inputField.getName(); Double value = features.get(inputName.toString()); FieldValue inputValue = inputField.prepare(value); arguments.put(inputName, inputValue);}\"\n },\n {\n \"code\": null,\n \"e\": 5565,\n \"s\": 5521,\n \"text\": \"And query the model in Java for prediction:\"\n },\n {\n \"code\": null,\n \"e\": 5876,\n \"s\": 5565,\n \"text\": \"Map results = evaluator.evaluate(arguments);// Extracting predictionMap resultRecord = EvaluatorUtil.decodeAll(results);Integer yPred = (Integer) resultRecord.get(targetName.toString());System.out.printf(\\\"Prediction is %d\\\\n\\\", yPred);System.out.printf(\\\"PMML output %s\\\\n\\\", resultRecord);\"\n },\n {\n \"code\": null,\n \"e\": 5926,\n \"s\": 5876,\n \"text\": \"With the code above, we get the following output:\"\n },\n {\n \"code\": null,\n \"e\": 6114,\n \"s\": 5926,\n \"text\": \"In my Machine Learning learning project, I used a regression boosting model. To my surprise, the exported PMML model produced the same results to the fifth decimal as the model in Python.\"\n },\n {\n \"code\": null,\n \"e\": 6194,\n \"s\": 6114,\n \"text\": \"I don’t have anything bad to say about PMML as it works reliably in production.\"\n },\n {\n \"code\": null,\n \"e\": 6309,\n \"s\": 6194,\n \"text\": \"Remember, you don’t need to copy-paste the code from this article as I created LoadPMMLModel repository on Github.\"\n },\n {\n \"code\": null,\n \"e\": 6363,\n \"s\": 6309,\n \"text\": \"Please let me know what are your thoughts about PMML.\"\n }\n]"}}},{"rowIdx":54,"cells":{"title":{"kind":"string","value":"How to change file extension in Python?"},"text":{"kind":"string","value":"When changing the extension, you're basically just renaming the file and changing the extension. In order to do that, you need to split the filename by '.' and replace the last entry by the new extension you want. You can do this using the os.rename method. \n>>> import os\n>>> my_file = 'my_file.txt'\n>>> base = os.path.splitext(my_file)[0]\n>>> os.rename(my_file, base + '.bin')\nThis will rename my_file.txt to my_file.bin"},"parsed":{"kind":"list like","value":[{"code":null,"e":1321,"s":1062,"text":"When changing the extension, you're basically just renaming the file and changing the extension. In order to do that, you need to split the filename by '.' and replace the last entry by the new extension you want. You can do this using the os.rename method. "},{"code":null,"e":1441,"s":1321,"text":">>> import os\n>>> my_file = 'my_file.txt'\n>>> base = os.path.splitext(my_file)[0]\n>>> os.rename(my_file, base + '.bin')"},{"code":null,"e":1485,"s":1441,"text":"This will rename my_file.txt to my_file.bin"}],"string":"[\n {\n \"code\": null,\n \"e\": 1321,\n \"s\": 1062,\n \"text\": \"When changing the extension, you're basically just renaming the file and changing the extension. In order to do that, you need to split the filename by '.' and replace the last entry by the new extension you want. You can do this using the os.rename method. \"\n },\n {\n \"code\": null,\n \"e\": 1441,\n \"s\": 1321,\n \"text\": \">>> import os\\n>>> my_file = 'my_file.txt'\\n>>> base = os.path.splitext(my_file)[0]\\n>>> os.rename(my_file, base + '.bin')\"\n },\n {\n \"code\": null,\n \"e\": 1485,\n \"s\": 1441,\n \"text\": \"This will rename my_file.txt to my_file.bin\"\n }\n]"}}},{"rowIdx":55,"cells":{"title":{"kind":"string","value":"How to Create an Infinite Loop in Windows Batch File? - GeeksforGeeks"},"text":{"kind":"string","value":"23 Mar, 2020\nAn infinite loop in Batch Script refers to the repetition of a command infinitely. The only way to stop an infinitely loop in Windows Batch Script is by either pressing Ctrl + C or by closing the program.\nSyntax: Suppose a variable ‘a’\n:a\nyour command here\ngoto a\n\nHere, you need to know how to create a batch file in windows. It is very simple. First, copy the code in a notepad file and save this file with .bat extension. To run or execute the file, double click on it or type the file name on cmd.\nExample 1: Let’s start by looping a simple command, such as ‘echo’. ‘echo‘ commands is analogous to ‘print’ command like in any other programming languages. Save the below code in a notepad file like sample.bat and double click on it to execute.\n@echo off\n:x\necho Hello! My fellow GFG Members!\ngoto x\n\nOutput:\nTo stop this infinite loop, press Ctrl + C and then press y and then Enter.\nExample 2: Suppose we want to loop the command ‘tree’. ‘tree’ command pulls and shows directory and file path in the form of a branching tree.\n@echo off REM turning off the echo-ing of the commands below\ncolor 0a REM changing font color to light green\ncd c:\\ REM put the directory name of which you want the tree of in place of c\n:y REM you can add any other variable in place of y\ntree \ngoto y\n\nNote: ‘REM’ command is only used for typing comments in the batch script program, you can ignore them while typing the program. They are only put for the understanding of the program script and have no real use in the program. Here you can see the below option also.\n@echo off \ncolor 0a \ncd c:\\ \n:y \ntree \ngoto y\n\nOutput:\nBatch-script\nTechTips\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nComments\nOld Comments\nHow to Add External JAR File to an IntelliJ IDEA Project?\nHow to Delete Temporary Files in Windows 10?\nHow to Convert Kotlin Code to Java Code in Android Studio?\nHow to Install Z Shell(zsh) on Linux?\nDifference between RUN vs CMD vs ENTRYPOINT Docker Commands\nHow to Install Flutter on Windows?\nHow to Access Localhost on Mobile Browsers?\nHow to Clone Android Project from GitHub in Android Studio?\nBasic Linux Commands for day to day life\nHow to Install Selenium WebDriver on MacOS?"},"parsed":{"kind":"list like","value":[{"code":null,"e":24801,"s":24773,"text":"\n23 Mar, 2020"},{"code":null,"e":25006,"s":24801,"text":"An infinite loop in Batch Script refers to the repetition of a command infinitely. The only way to stop an infinitely loop in Windows Batch Script is by either pressing Ctrl + C or by closing the program."},{"code":null,"e":25037,"s":25006,"text":"Syntax: Suppose a variable ‘a’"},{"code":null,"e":25066,"s":25037,"text":":a\nyour command here\ngoto a\n"},{"code":null,"e":25303,"s":25066,"text":"Here, you need to know how to create a batch file in windows. It is very simple. First, copy the code in a notepad file and save this file with .bat extension. To run or execute the file, double click on it or type the file name on cmd."},{"code":null,"e":25549,"s":25303,"text":"Example 1: Let’s start by looping a simple command, such as ‘echo’. ‘echo‘ commands is analogous to ‘print’ command like in any other programming languages. Save the below code in a notepad file like sample.bat and double click on it to execute."},{"code":null,"e":25605,"s":25549,"text":"@echo off\n:x\necho Hello! My fellow GFG Members!\ngoto x\n"},{"code":null,"e":25613,"s":25605,"text":"Output:"},{"code":null,"e":25689,"s":25613,"text":"To stop this infinite loop, press Ctrl + C and then press y and then Enter."},{"code":null,"e":25832,"s":25689,"text":"Example 2: Suppose we want to loop the command ‘tree’. ‘tree’ command pulls and shows directory and file path in the form of a branching tree."},{"code":null,"e":26085,"s":25832,"text":"@echo off REM turning off the echo-ing of the commands below\ncolor 0a REM changing font color to light green\ncd c:\\ REM put the directory name of which you want the tree of in place of c\n:y REM you can add any other variable in place of y\ntree \ngoto y\n"},{"code":null,"e":26352,"s":26085,"text":"Note: ‘REM’ command is only used for typing comments in the batch script program, you can ignore them while typing the program. They are only put for the understanding of the program script and have no real use in the program. Here you can see the below option also."},{"code":null,"e":26399,"s":26352,"text":"@echo off \ncolor 0a \ncd c:\\ \n:y \ntree \ngoto y\n"},{"code":null,"e":26407,"s":26399,"text":"Output:"},{"code":null,"e":26420,"s":26407,"text":"Batch-script"},{"code":null,"e":26429,"s":26420,"text":"TechTips"},{"code":null,"e":26527,"s":26429,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":26536,"s":26527,"text":"Comments"},{"code":null,"e":26549,"s":26536,"text":"Old Comments"},{"code":null,"e":26607,"s":26549,"text":"How to Add External JAR File to an IntelliJ IDEA Project?"},{"code":null,"e":26652,"s":26607,"text":"How to Delete Temporary Files in Windows 10?"},{"code":null,"e":26711,"s":26652,"text":"How to Convert Kotlin Code to Java Code in Android Studio?"},{"code":null,"e":26749,"s":26711,"text":"How to Install Z Shell(zsh) on Linux?"},{"code":null,"e":26809,"s":26749,"text":"Difference between RUN vs CMD vs ENTRYPOINT Docker Commands"},{"code":null,"e":26844,"s":26809,"text":"How to Install Flutter on Windows?"},{"code":null,"e":26888,"s":26844,"text":"How to Access Localhost on Mobile Browsers?"},{"code":null,"e":26948,"s":26888,"text":"How to Clone Android Project from GitHub in Android Studio?"},{"code":null,"e":26989,"s":26948,"text":"Basic Linux Commands for day to day life"}],"string":"[\n {\n \"code\": null,\n \"e\": 24801,\n \"s\": 24773,\n \"text\": \"\\n23 Mar, 2020\"\n },\n {\n \"code\": null,\n \"e\": 25006,\n \"s\": 24801,\n \"text\": \"An infinite loop in Batch Script refers to the repetition of a command infinitely. The only way to stop an infinitely loop in Windows Batch Script is by either pressing Ctrl + C or by closing the program.\"\n },\n {\n \"code\": null,\n \"e\": 25037,\n \"s\": 25006,\n \"text\": \"Syntax: Suppose a variable ‘a’\"\n },\n {\n \"code\": null,\n \"e\": 25066,\n \"s\": 25037,\n \"text\": \":a\\nyour command here\\ngoto a\\n\"\n },\n {\n \"code\": null,\n \"e\": 25303,\n \"s\": 25066,\n \"text\": \"Here, you need to know how to create a batch file in windows. It is very simple. First, copy the code in a notepad file and save this file with .bat extension. To run or execute the file, double click on it or type the file name on cmd.\"\n },\n {\n \"code\": null,\n \"e\": 25549,\n \"s\": 25303,\n \"text\": \"Example 1: Let’s start by looping a simple command, such as ‘echo’. ‘echo‘ commands is analogous to ‘print’ command like in any other programming languages. Save the below code in a notepad file like sample.bat and double click on it to execute.\"\n },\n {\n \"code\": null,\n \"e\": 25605,\n \"s\": 25549,\n \"text\": \"@echo off\\n:x\\necho Hello! My fellow GFG Members!\\ngoto x\\n\"\n },\n {\n \"code\": null,\n \"e\": 25613,\n \"s\": 25605,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 25689,\n \"s\": 25613,\n \"text\": \"To stop this infinite loop, press Ctrl + C and then press y and then Enter.\"\n },\n {\n \"code\": null,\n \"e\": 25832,\n \"s\": 25689,\n \"text\": \"Example 2: Suppose we want to loop the command ‘tree’. ‘tree’ command pulls and shows directory and file path in the form of a branching tree.\"\n },\n {\n \"code\": null,\n \"e\": 26085,\n \"s\": 25832,\n \"text\": \"@echo off REM turning off the echo-ing of the commands below\\ncolor 0a REM changing font color to light green\\ncd c:\\\\ REM put the directory name of which you want the tree of in place of c\\n:y REM you can add any other variable in place of y\\ntree \\ngoto y\\n\"\n },\n {\n \"code\": null,\n \"e\": 26352,\n \"s\": 26085,\n \"text\": \"Note: ‘REM’ command is only used for typing comments in the batch script program, you can ignore them while typing the program. They are only put for the understanding of the program script and have no real use in the program. Here you can see the below option also.\"\n },\n {\n \"code\": null,\n \"e\": 26399,\n \"s\": 26352,\n \"text\": \"@echo off \\ncolor 0a \\ncd c:\\\\ \\n:y \\ntree \\ngoto y\\n\"\n },\n {\n \"code\": null,\n \"e\": 26407,\n \"s\": 26399,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 26420,\n \"s\": 26407,\n \"text\": \"Batch-script\"\n },\n {\n \"code\": null,\n \"e\": 26429,\n \"s\": 26420,\n \"text\": \"TechTips\"\n },\n {\n \"code\": null,\n \"e\": 26527,\n \"s\": 26429,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 26536,\n \"s\": 26527,\n \"text\": \"Comments\"\n },\n {\n \"code\": null,\n \"e\": 26549,\n \"s\": 26536,\n \"text\": \"Old Comments\"\n },\n {\n \"code\": null,\n \"e\": 26607,\n \"s\": 26549,\n \"text\": \"How to Add External JAR File to an IntelliJ IDEA Project?\"\n },\n {\n \"code\": null,\n \"e\": 26652,\n \"s\": 26607,\n \"text\": \"How to Delete Temporary Files in Windows 10?\"\n },\n {\n \"code\": null,\n \"e\": 26711,\n \"s\": 26652,\n \"text\": \"How to Convert Kotlin Code to Java Code in Android Studio?\"\n },\n {\n \"code\": null,\n \"e\": 26749,\n \"s\": 26711,\n \"text\": \"How to Install Z Shell(zsh) on Linux?\"\n },\n {\n \"code\": null,\n \"e\": 26809,\n \"s\": 26749,\n \"text\": \"Difference between RUN vs CMD vs ENTRYPOINT Docker Commands\"\n },\n {\n \"code\": null,\n \"e\": 26844,\n \"s\": 26809,\n \"text\": \"How to Install Flutter on Windows?\"\n },\n {\n \"code\": null,\n \"e\": 26888,\n \"s\": 26844,\n \"text\": \"How to Access Localhost on Mobile Browsers?\"\n },\n {\n \"code\": null,\n \"e\": 26948,\n \"s\": 26888,\n \"text\": \"How to Clone Android Project from GitHub in Android Studio?\"\n },\n {\n \"code\": null,\n \"e\": 26989,\n \"s\": 26948,\n \"text\": \"Basic Linux Commands for day to day life\"\n }\n]"}}},{"rowIdx":56,"cells":{"title":{"kind":"string","value":"Broken Pipe Error in Python - GeeksforGeeks"},"text":{"kind":"string","value":"23 Sep, 2021\nIn this article, we will discuss Pipe Error in python starting from how an error is occurred in python along with the type of solution needed to be followed to rectify the error in python. So, let’s go into this article to understand the concept well. \nWith the advancement of emerging technologies in the IT sector, the use of programming language is playing a vital role. Thus the proper language is considered for the fast executions of the functions. In such a case, Python emerges as the most important language to satisfy the needs of the current problem execution because of its simplicity and availability of various libraries. But along with the execution, the errors during the execution also comes into existence and it becomes difficult for the programmers to rectify the errors for the processing of the problem.\nA broken Pipe Error is generally an Input/Output Error, which is occurred at the Linux System level. The error has occurred during the reading and writing of the files and it mainly occurs during the operations of the files. The same error that occurred in the Linux system is EPIPE, but every library function which returns its error code also generates a signal called SIGPIPE, this signal is used to terminate the program if it is not handled or blocked. Thus a program will never be able to see the EPIPE error unless it has handled or blocked SIGPIPE.\nPython interpreter is not capable enough to ignore SIGPIPE by default, instead, it converts this signal into an exception and raises an error which is known as IOError(INPUT/OUTPUT error) also know as ‘Error 32’ or Broken Pipe Error.\npython .py | head\nThis pipeline code written above will create a process that will send the data upstream and a process that reads the data downstream. But when the downstream process will not be able to read the data upstream, it will raise an exception by sending SIGPIPE signal to the upstream process. Thus upstream process in a python problem will raise an error such as IOError: Broken pipe error will occur.\nExample:\nPython3\nfor i in range(4000): print(i)\nWhen we run this file from unix commands:\npython3 main.py | head -n3000\nApproach 1: To avoid the error we need to make the terminal run the code efficiently without catching the SIGPIPE signal, so for these, we can add the below code at the top of the python program.\nfrom signal import signal, SIGPIPE, SIG_DFL \nsignal(SIGPIPE,SIG_DFL) \nPython3\nfrom signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE,SIG_DFL) for i in range(4000): print(i)\nOutput:\n0\n1\n20\n1\n2\n3\n4\n5\n6\n7\n8\n9\n3\n4\n5\n6\n7\n8\n9\nExplanation:\nThe above code which is placed on the top of the python code is used to redirect the SIGPIPE signals to the default SIG_DFL signal, which the system generally ignores so that the rest part of the code can be executed seamlessly. But Approach 11 is not effective because in the Python manual on the signal library, which is mentioned that this type of signal handling should be avoided and should not be practiced in any part of the code. So for this reason we will go for the second approach.\nApproach 2: We can handle this type of error by using the functionality of try/catch block which is already approved by the python manual and is advised to follow such procedure to handle the errors.\nimport sys, errno \ntry: \n # INPUT/OUTPUT operation #\nexcept IOError as e: \n if e.errno == errno.EPIPE: \n # Handling of the error \nExample:\nPython3\nimport sysimport errno try: for i in range(4000): print(i)except IOError as e: if e.errno == errno.EPIPE: pass # Handling of the error\nOutput:\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\nExplanation:\nIn the above piece of code, we have used the built-in library of python which is the Sys and Errno module, and use the try/catch block in order to catch the raised SIGPIPE exception and handle it before it stops the program execution.\nPython-exceptions\nPython\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nComments\nOld Comments\nPython OOPs Concepts\nHow to Install PIP on Windows ?\nBar Plot in Matplotlib\nDefaultdict in Python\nPython Classes and Objects\nDeque in Python\nCheck if element exists in list in Python\nHow to drop one or multiple columns in Pandas Dataframe\nPython - Ways to remove duplicates from list\nClass method vs Static method in Python"},"parsed":{"kind":"list like","value":[{"code":null,"e":24212,"s":24184,"text":"\n23 Sep, 2021"},{"code":null,"e":24465,"s":24212,"text":"In this article, we will discuss Pipe Error in python starting from how an error is occurred in python along with the type of solution needed to be followed to rectify the error in python. So, let’s go into this article to understand the concept well. "},{"code":null,"e":25038,"s":24465,"text":"With the advancement of emerging technologies in the IT sector, the use of programming language is playing a vital role. Thus the proper language is considered for the fast executions of the functions. In such a case, Python emerges as the most important language to satisfy the needs of the current problem execution because of its simplicity and availability of various libraries. But along with the execution, the errors during the execution also comes into existence and it becomes difficult for the programmers to rectify the errors for the processing of the problem."},{"code":null,"e":25595,"s":25038,"text":"A broken Pipe Error is generally an Input/Output Error, which is occurred at the Linux System level. The error has occurred during the reading and writing of the files and it mainly occurs during the operations of the files. The same error that occurred in the Linux system is EPIPE, but every library function which returns its error code also generates a signal called SIGPIPE, this signal is used to terminate the program if it is not handled or blocked. Thus a program will never be able to see the EPIPE error unless it has handled or blocked SIGPIPE."},{"code":null,"e":25829,"s":25595,"text":"Python interpreter is not capable enough to ignore SIGPIPE by default, instead, it converts this signal into an exception and raises an error which is known as IOError(INPUT/OUTPUT error) also know as ‘Error 32’ or Broken Pipe Error."},{"code":null,"e":25857,"s":25829,"text":"python .py | head"},{"code":null,"e":26254,"s":25857,"text":"This pipeline code written above will create a process that will send the data upstream and a process that reads the data downstream. But when the downstream process will not be able to read the data upstream, it will raise an exception by sending SIGPIPE signal to the upstream process. Thus upstream process in a python problem will raise an error such as IOError: Broken pipe error will occur."},{"code":null,"e":26263,"s":26254,"text":"Example:"},{"code":null,"e":26271,"s":26263,"text":"Python3"},{"code":"for i in range(4000): print(i)","e":26305,"s":26271,"text":null},{"code":null,"e":26347,"s":26305,"text":"When we run this file from unix commands:"},{"code":null,"e":26377,"s":26347,"text":"python3 main.py | head -n3000"},{"code":null,"e":26573,"s":26377,"text":"Approach 1: To avoid the error we need to make the terminal run the code efficiently without catching the SIGPIPE signal, so for these, we can add the below code at the top of the python program."},{"code":null,"e":26644,"s":26573,"text":"from signal import signal, SIGPIPE, SIG_DFL \nsignal(SIGPIPE,SIG_DFL) "},{"code":null,"e":26652,"s":26644,"text":"Python3"},{"code":"from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE,SIG_DFL) for i in range(4000): print(i)","e":26756,"s":26652,"text":null},{"code":null,"e":26764,"s":26756,"text":"Output:"},{"code":null,"e":26803,"s":26764,"text":"0\n1\n20\n1\n2\n3\n4\n5\n6\n7\n8\n9\n3\n4\n5\n6\n7\n8\n9"},{"code":null,"e":26816,"s":26803,"text":"Explanation:"},{"code":null,"e":27309,"s":26816,"text":"The above code which is placed on the top of the python code is used to redirect the SIGPIPE signals to the default SIG_DFL signal, which the system generally ignores so that the rest part of the code can be executed seamlessly. But Approach 11 is not effective because in the Python manual on the signal library, which is mentioned that this type of signal handling should be avoided and should not be practiced in any part of the code. So for this reason we will go for the second approach."},{"code":null,"e":27509,"s":27309,"text":"Approach 2: We can handle this type of error by using the functionality of try/catch block which is already approved by the python manual and is advised to follow such procedure to handle the errors."},{"code":null,"e":27654,"s":27509,"text":"import sys, errno \ntry: \n # INPUT/OUTPUT operation #\nexcept IOError as e: \n if e.errno == errno.EPIPE: \n # Handling of the error "},{"code":null,"e":27663,"s":27654,"text":"Example:"},{"code":null,"e":27671,"s":27663,"text":"Python3"},{"code":"import sysimport errno try: for i in range(4000): print(i)except IOError as e: if e.errno == errno.EPIPE: pass # Handling of the error","e":27831,"s":27671,"text":null},{"code":null,"e":27839,"s":27831,"text":"Output:"},{"code":null,"e":27859,"s":27839,"text":"0\n1\n2\n3\n4\n5\n6\n7\n8\n9"},{"code":null,"e":27872,"s":27859,"text":"Explanation:"},{"code":null,"e":28107,"s":27872,"text":"In the above piece of code, we have used the built-in library of python which is the Sys and Errno module, and use the try/catch block in order to catch the raised SIGPIPE exception and handle it before it stops the program execution."},{"code":null,"e":28125,"s":28107,"text":"Python-exceptions"},{"code":null,"e":28132,"s":28125,"text":"Python"},{"code":null,"e":28230,"s":28132,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":28239,"s":28230,"text":"Comments"},{"code":null,"e":28252,"s":28239,"text":"Old Comments"},{"code":null,"e":28273,"s":28252,"text":"Python OOPs Concepts"},{"code":null,"e":28305,"s":28273,"text":"How to Install PIP on Windows ?"},{"code":null,"e":28328,"s":28305,"text":"Bar Plot in Matplotlib"},{"code":null,"e":28350,"s":28328,"text":"Defaultdict in Python"},{"code":null,"e":28377,"s":28350,"text":"Python Classes and Objects"},{"code":null,"e":28393,"s":28377,"text":"Deque in Python"},{"code":null,"e":28435,"s":28393,"text":"Check if element exists in list in Python"},{"code":null,"e":28491,"s":28435,"text":"How to drop one or multiple columns in Pandas Dataframe"},{"code":null,"e":28536,"s":28491,"text":"Python - Ways to remove duplicates from list"}],"string":"[\n {\n \"code\": null,\n \"e\": 24212,\n \"s\": 24184,\n \"text\": \"\\n23 Sep, 2021\"\n },\n {\n \"code\": null,\n \"e\": 24465,\n \"s\": 24212,\n \"text\": \"In this article, we will discuss Pipe Error in python starting from how an error is occurred in python along with the type of solution needed to be followed to rectify the error in python. So, let’s go into this article to understand the concept well. \"\n },\n {\n \"code\": null,\n \"e\": 25038,\n \"s\": 24465,\n \"text\": \"With the advancement of emerging technologies in the IT sector, the use of programming language is playing a vital role. Thus the proper language is considered for the fast executions of the functions. In such a case, Python emerges as the most important language to satisfy the needs of the current problem execution because of its simplicity and availability of various libraries. But along with the execution, the errors during the execution also comes into existence and it becomes difficult for the programmers to rectify the errors for the processing of the problem.\"\n },\n {\n \"code\": null,\n \"e\": 25595,\n \"s\": 25038,\n \"text\": \"A broken Pipe Error is generally an Input/Output Error, which is occurred at the Linux System level. The error has occurred during the reading and writing of the files and it mainly occurs during the operations of the files. The same error that occurred in the Linux system is EPIPE, but every library function which returns its error code also generates a signal called SIGPIPE, this signal is used to terminate the program if it is not handled or blocked. Thus a program will never be able to see the EPIPE error unless it has handled or blocked SIGPIPE.\"\n },\n {\n \"code\": null,\n \"e\": 25829,\n \"s\": 25595,\n \"text\": \"Python interpreter is not capable enough to ignore SIGPIPE by default, instead, it converts this signal into an exception and raises an error which is known as IOError(INPUT/OUTPUT error) also know as ‘Error 32’ or Broken Pipe Error.\"\n },\n {\n \"code\": null,\n \"e\": 25857,\n \"s\": 25829,\n \"text\": \"python .py | head\"\n },\n {\n \"code\": null,\n \"e\": 26254,\n \"s\": 25857,\n \"text\": \"This pipeline code written above will create a process that will send the data upstream and a process that reads the data downstream. But when the downstream process will not be able to read the data upstream, it will raise an exception by sending SIGPIPE signal to the upstream process. Thus upstream process in a python problem will raise an error such as IOError: Broken pipe error will occur.\"\n },\n {\n \"code\": null,\n \"e\": 26263,\n \"s\": 26254,\n \"text\": \"Example:\"\n },\n {\n \"code\": null,\n \"e\": 26271,\n \"s\": 26263,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"for i in range(4000): print(i)\",\n \"e\": 26305,\n \"s\": 26271,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 26347,\n \"s\": 26305,\n \"text\": \"When we run this file from unix commands:\"\n },\n {\n \"code\": null,\n \"e\": 26377,\n \"s\": 26347,\n \"text\": \"python3 main.py | head -n3000\"\n },\n {\n \"code\": null,\n \"e\": 26573,\n \"s\": 26377,\n \"text\": \"Approach 1: To avoid the error we need to make the terminal run the code efficiently without catching the SIGPIPE signal, so for these, we can add the below code at the top of the python program.\"\n },\n {\n \"code\": null,\n \"e\": 26644,\n \"s\": 26573,\n \"text\": \"from signal import signal, SIGPIPE, SIG_DFL \\nsignal(SIGPIPE,SIG_DFL) \"\n },\n {\n \"code\": null,\n \"e\": 26652,\n \"s\": 26644,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE,SIG_DFL) for i in range(4000): print(i)\",\n \"e\": 26756,\n \"s\": 26652,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 26764,\n \"s\": 26756,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 26803,\n \"s\": 26764,\n \"text\": \"0\\n1\\n20\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n3\\n4\\n5\\n6\\n7\\n8\\n9\"\n },\n {\n \"code\": null,\n \"e\": 26816,\n \"s\": 26803,\n \"text\": \"Explanation:\"\n },\n {\n \"code\": null,\n \"e\": 27309,\n \"s\": 26816,\n \"text\": \"The above code which is placed on the top of the python code is used to redirect the SIGPIPE signals to the default SIG_DFL signal, which the system generally ignores so that the rest part of the code can be executed seamlessly. But Approach 11 is not effective because in the Python manual on the signal library, which is mentioned that this type of signal handling should be avoided and should not be practiced in any part of the code. So for this reason we will go for the second approach.\"\n },\n {\n \"code\": null,\n \"e\": 27509,\n \"s\": 27309,\n \"text\": \"Approach 2: We can handle this type of error by using the functionality of try/catch block which is already approved by the python manual and is advised to follow such procedure to handle the errors.\"\n },\n {\n \"code\": null,\n \"e\": 27654,\n \"s\": 27509,\n \"text\": \"import sys, errno \\ntry: \\n # INPUT/OUTPUT operation #\\nexcept IOError as e: \\n if e.errno == errno.EPIPE: \\n # Handling of the error \"\n },\n {\n \"code\": null,\n \"e\": 27663,\n \"s\": 27654,\n \"text\": \"Example:\"\n },\n {\n \"code\": null,\n \"e\": 27671,\n \"s\": 27663,\n \"text\": \"Python3\"\n },\n {\n \"code\": \"import sysimport errno try: for i in range(4000): print(i)except IOError as e: if e.errno == errno.EPIPE: pass # Handling of the error\",\n \"e\": 27831,\n \"s\": 27671,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 27839,\n \"s\": 27831,\n \"text\": \"Output:\"\n },\n {\n \"code\": null,\n \"e\": 27859,\n \"s\": 27839,\n \"text\": \"0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\"\n },\n {\n \"code\": null,\n \"e\": 27872,\n \"s\": 27859,\n \"text\": \"Explanation:\"\n },\n {\n \"code\": null,\n \"e\": 28107,\n \"s\": 27872,\n \"text\": \"In the above piece of code, we have used the built-in library of python which is the Sys and Errno module, and use the try/catch block in order to catch the raised SIGPIPE exception and handle it before it stops the program execution.\"\n },\n {\n \"code\": null,\n \"e\": 28125,\n \"s\": 28107,\n \"text\": \"Python-exceptions\"\n },\n {\n \"code\": null,\n \"e\": 28132,\n \"s\": 28125,\n \"text\": \"Python\"\n },\n {\n \"code\": null,\n \"e\": 28230,\n \"s\": 28132,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 28239,\n \"s\": 28230,\n \"text\": \"Comments\"\n },\n {\n \"code\": null,\n \"e\": 28252,\n \"s\": 28239,\n \"text\": \"Old Comments\"\n },\n {\n \"code\": null,\n \"e\": 28273,\n \"s\": 28252,\n \"text\": \"Python OOPs Concepts\"\n },\n {\n \"code\": null,\n \"e\": 28305,\n \"s\": 28273,\n \"text\": \"How to Install PIP on Windows ?\"\n },\n {\n \"code\": null,\n \"e\": 28328,\n \"s\": 28305,\n \"text\": \"Bar Plot in Matplotlib\"\n },\n {\n \"code\": null,\n \"e\": 28350,\n \"s\": 28328,\n \"text\": \"Defaultdict in Python\"\n },\n {\n \"code\": null,\n \"e\": 28377,\n \"s\": 28350,\n \"text\": \"Python Classes and Objects\"\n },\n {\n \"code\": null,\n \"e\": 28393,\n \"s\": 28377,\n \"text\": \"Deque in Python\"\n },\n {\n \"code\": null,\n \"e\": 28435,\n \"s\": 28393,\n \"text\": \"Check if element exists in list in Python\"\n },\n {\n \"code\": null,\n \"e\": 28491,\n \"s\": 28435,\n \"text\": \"How to drop one or multiple columns in Pandas Dataframe\"\n },\n {\n \"code\": null,\n \"e\": 28536,\n \"s\": 28491,\n \"text\": \"Python - Ways to remove duplicates from list\"\n }\n]"}}},{"rowIdx":57,"cells":{"title":{"kind":"string","value":"Hadoop - Enviornment Setup"},"text":{"kind":"string","value":"Hadoop is supported by GNU/Linux platform and its flavors. Therefore, we have to install a Linux operating system for setting up Hadoop environment. In case you have an OS other than Linux, you can install a Virtualbox software in it and have Linux inside the Virtualbox.\nBefore installing Hadoop into the Linux environment, we need to set up Linux using ssh (Secure Shell). Follow the steps given below for setting up the Linux environment.\nAt the beginning, it is recommended to create a separate user for Hadoop to isolate Hadoop file system from Unix file system. Follow the steps given below to create a user −\nOpen the root using the command “su”.\nOpen the root using the command “su”.\nCreate a user from the root account using the command “useradd username”.\nCreate a user from the root account using the command “useradd username”.\nNow you can open an existing user account using the command “su username”. \nNow you can open an existing user account using the command “su username”. \nOpen the Linux terminal and type the following commands to create a user.\n$ su \n password: \n# useradd hadoop \n# passwd hadoop \n New passwd: \n Retype new passwd \n\nSSH setup is required to do different operations on a cluster such as starting, stopping, distributed daemon shell operations. To authenticate different users of Hadoop, it is required to provide public/private key pair for a Hadoop user and share it with different users.\nThe following commands are used for generating a key value pair using SSH. Copy the public keys form id_rsa.pub to authorized_keys, and provide the owner with read and write permissions to authorized_keys file respectively.\n$ ssh-keygen -t rsa \n$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys \n$ chmod 0600 ~/.ssh/authorized_keys \n\nJava is the main prerequisite for Hadoop. First of all, you should verify the existence of java in your system using the command “java -version”. The syntax of java version command is given below.\n$ java -version \n\nIf everything is in order, it will give you the following output.\njava version \"1.7.0_71\" \nJava(TM) SE Runtime Environment (build 1.7.0_71-b13) \nJava HotSpot(TM) Client VM (build 25.0-b02, mixed mode) \n\nIf java is not installed in your system, then follow the steps given below for installing java.\nDownload java (JDK - X64.tar.gz) by visiting the following link www.oracle.com\nThen jdk-7u71-linux-x64.tar.gz will be downloaded into your system. \nGenerally you will find the downloaded java file in Downloads folder. Verify it and extract the jdk-7u71-linux-x64.gz file using the following commands.\n$ cd Downloads/ \n$ ls \njdk-7u71-linux-x64.gz \n\n$ tar zxf jdk-7u71-linux-x64.gz \n$ ls \njdk1.7.0_71 jdk-7u71-linux-x64.gz \n\nTo make java available to all the users, you have to move it to the location “/usr/local/”. Open root, and type the following commands.\n$ su \npassword: \n# mv jdk1.7.0_71 /usr/local/ \n# exit \n\nFor setting up PATH and JAVA_HOME variables, add the following commands to ~/.bashrc file.\nexport JAVA_HOME=/usr/local/jdk1.7.0_71 \nexport PATH=$PATH:$JAVA_HOME/bin \n\nNow apply all the changes into the current running system.\n$ source ~/.bashrc\n\nUse the following commands to configure java alternatives −\n# alternatives --install /usr/bin/java java usr/local/java/bin/java 2\n# alternatives --install /usr/bin/javac javac usr/local/java/bin/javac 2\n# alternatives --install /usr/bin/jar jar usr/local/java/bin/jar 2\n\n# alternatives --set java usr/local/java/bin/java\n# alternatives --set javac usr/local/java/bin/javac\n# alternatives --set jar usr/local/java/bin/jar\n\nNow verify the java -version command from the terminal as explained above.\nDownload and extract Hadoop 2.4.1 from Apache software foundation using the following commands.\n$ su \npassword: \n# cd /usr/local \n# wget http://apache.claz.org/hadoop/common/hadoop-2.4.1/ \nhadoop-2.4.1.tar.gz \n# tar xzf hadoop-2.4.1.tar.gz \n# mv hadoop-2.4.1/* to hadoop/ \n# exit \n\nOnce you have downloaded Hadoop, you can operate your Hadoop cluster in one of the three supported modes −\nLocal/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process.\nLocal/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process.\nPseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development.\nPseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development.\nFully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters.\nFully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters.\nHere we will discuss the installation of Hadoop 2.4.1 in standalone mode.\nThere are no daemons running and everything runs in a single JVM. Standalone mode is suitable for running MapReduce programs during development, since it is easy to test and debug them.\nYou can set Hadoop environment variables by appending the following commands to ~/.bashrc file.\nexport HADOOP_HOME=/usr/local/hadoop \n\nBefore proceeding further, you need to make sure that Hadoop is working fine. Just issue the following command −\n$ hadoop version \n\nIf everything is fine with your setup, then you should see the following result −\nHadoop 2.4.1 \nSubversion https://svn.apache.org/repos/asf/hadoop/common -r 1529768 \nCompiled by hortonmu on 2013-10-07T06:28Z \nCompiled with protoc 2.5.0\nFrom source with checksum 79e53ce7994d1628b240f09af91e1af4 \n\nIt means your Hadoop's standalone mode setup is working fine. By default, Hadoop is configured to run in a non-distributed mode on a single machine.\nLet's check a simple example of Hadoop. Hadoop installation delivers the following example MapReduce jar file, which provides basic functionality of MapReduce and can be used for calculating, like Pi value, word counts in a given list of files, etc.\n$HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar \n\nLet's have an input directory where we will push a few files and our requirement is to count the total number of words in those files. To calculate the total number of words, we do not need to write our MapReduce, provided the .jar file contains the implementation for word count. You can try other examples using the same .jar file; just issue the following commands to check supported MapReduce functional programs by hadoop-mapreduce-examples-2.2.0.jar file.\n$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar \n\nCreate temporary content files in the input directory. You can create this input directory anywhere you would like to work.\n$ mkdir input \n$ cp $HADOOP_HOME/*.txt input \n$ ls -l input \n\nIt will give the following files in your input directory −\ntotal 24 \n-rw-r--r-- 1 root root 15164 Feb 21 10:14 LICENSE.txt \n-rw-r--r-- 1 root root 101 Feb 21 10:14 NOTICE.txt\n-rw-r--r-- 1 root root 1366 Feb 21 10:14 README.txt \n\nThese files have been copied from the Hadoop installation home directory. For your experiment, you can have different and large sets of files.\nLet's start the Hadoop process to count the total number of words in all the files available in the input directory, as follows −\n$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar wordcount input output \n\nStep-2 will do the required processing and save the output in output/part-r00000 file, which you can check by using −\n$cat output/* \n\nIt will list down all the words along with their total counts available in all the files available in the input directory.\n\"AS 4 \n\"Contribution\" 1 \n\"Contributor\" 1 \n\"Derivative 1\n\"Legal 1\n\"License\" 1\n\"License\"); 1 \n\"Licensor\" 1\n\"NOTICE” 1 \n\"Not 1 \n\"Object\" 1 \n\"Source” 1 \n\"Work” 1 \n\"You\" 1 \n\"Your\") 1 \n\"[]\" 1 \n\"control\" 1 \n\"printed 1 \n\"submitted\" 1 \n(50%) 1 \n(BIS), 1 \n(C) 1 \n(Don't) 1 \n(ECCN) 1 \n(INCLUDING 2 \n(INCLUDING, 2 \n.............\n\nFollow the steps given below to install Hadoop 2.4.1 in pseudo distributed mode.\nYou can set Hadoop environment variables by appending the following commands to ~/.bashrc file.\nexport HADOOP_HOME=/usr/local/hadoop \nexport HADOOP_MAPRED_HOME=$HADOOP_HOME \nexport HADOOP_COMMON_HOME=$HADOOP_HOME \n\nexport HADOOP_HDFS_HOME=$HADOOP_HOME \nexport YARN_HOME=$HADOOP_HOME \nexport HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native \nexport PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin \nexport HADOOP_INSTALL=$HADOOP_HOME \n\nNow apply all the changes into the current running system.\n$ source ~/.bashrc \n\nYou can find all the Hadoop configuration files in the location “$HADOOP_HOME/etc/hadoop”. It is required to make changes in those configuration files according to your Hadoop infrastructure.\n$ cd $HADOOP_HOME/etc/hadoop\n\nIn order to develop Hadoop programs in java, you have to reset the java environment variables in hadoop-env.sh file by replacing JAVA_HOME value with the location of java in your system.\nexport JAVA_HOME=/usr/local/jdk1.7.0_71\n\nThe following are the list of files that you have to edit to configure Hadoop. \ncore-site.xml\nThe core-site.xml file contains information such as the port number used for Hadoop instance, memory allocated for the file system, memory limit for storing the data, and size of Read/Write buffers.\nOpen the core-site.xml and add the following properties in between , tags.\n\n \n fs.default.name\n hdfs://localhost:9000 \n \n\n\nhdfs-site.xml\nThe hdfs-site.xml file contains information such as the value of replication data, namenode path, and datanode paths of your local file systems. It means the place where you want to store the Hadoop infrastructure.\nLet us assume the following data.\ndfs.replication (data replication value) = 1 \n\n(In the below given path /hadoop/ is the user name. \nhadoopinfra/hdfs/namenode is the directory created by hdfs file system.) \nnamenode path = //home/hadoop/hadoopinfra/hdfs/namenode \n\n(hadoopinfra/hdfs/datanode is the directory created by hdfs file system.) \ndatanode path = //home/hadoop/hadoopinfra/hdfs/datanode \n\nOpen this file and add the following properties in between the tags in this file.\n\n \n dfs.replication\n 1\n \n \n \n dfs.name.dir\n file:///home/hadoop/hadoopinfra/hdfs/namenode \n \n \n \n dfs.data.dir \n file:///home/hadoop/hadoopinfra/hdfs/datanode \n \n\n\nNote − In the above file, all the property values are user-defined and you can make changes according to your Hadoop infrastructure.\nyarn-site.xml\nThis file is used to configure yarn into Hadoop. Open the yarn-site.xml file and add the following properties in between the , tags in this file.\n\n \n yarn.nodemanager.aux-services\n mapreduce_shuffle \n \n\n\nmapred-site.xml\nThis file is used to specify which MapReduce framework we are using. By default, Hadoop contains a template of yarn-site.xml. First of all, it is required to copy the file from mapred-site.xml.template to mapred-site.xml file using the following command.\n$ cp mapred-site.xml.template mapred-site.xml \n\nOpen mapred-site.xml file and add the following properties in between the , tags in this file.\n\n \n mapreduce.framework.name\n yarn\n \n\n\nThe following steps are used to verify the Hadoop installation.\nSet up the namenode using the command “hdfs namenode -format” as follows.\n$ cd ~ \n$ hdfs namenode -format \n\nThe expected result is as follows.\n10/24/14 21:30:55 INFO namenode.NameNode: STARTUP_MSG: \n/************************************************************ \nSTARTUP_MSG: Starting NameNode \nSTARTUP_MSG: host = localhost/192.168.1.11 \nSTARTUP_MSG: args = [-format] \nSTARTUP_MSG: version = 2.4.1 \n...\n...\n10/24/14 21:30:56 INFO common.Storage: Storage directory \n/home/hadoop/hadoopinfra/hdfs/namenode has been successfully formatted. \n10/24/14 21:30:56 INFO namenode.NNStorageRetentionManager: Going to \nretain 1 images with txid >= 0 \n10/24/14 21:30:56 INFO util.ExitUtil: Exiting with status 0 \n10/24/14 21:30:56 INFO namenode.NameNode: SHUTDOWN_MSG: \n/************************************************************ \nSHUTDOWN_MSG: Shutting down NameNode at localhost/192.168.1.11 \n************************************************************/\n\nThe following command is used to start dfs. Executing this command will start your Hadoop file system.\n$ start-dfs.sh \n\nThe expected output is as follows −\n10/24/14 21:37:56 \nStarting namenodes on [localhost] \nlocalhost: starting namenode, logging to /home/hadoop/hadoop\n2.4.1/logs/hadoop-hadoop-namenode-localhost.out \nlocalhost: starting datanode, logging to /home/hadoop/hadoop\n2.4.1/logs/hadoop-hadoop-datanode-localhost.out \nStarting secondary namenodes [0.0.0.0]\n\nThe following command is used to start the yarn script. Executing this command will start your yarn daemons.\n$ start-yarn.sh \n\nThe expected output as follows −\nstarting yarn daemons \nstarting resourcemanager, logging to /home/hadoop/hadoop\n2.4.1/logs/yarn-hadoop-resourcemanager-localhost.out \nlocalhost: starting nodemanager, logging to /home/hadoop/hadoop\n2.4.1/logs/yarn-hadoop-nodemanager-localhost.out \n\nThe default port number to access Hadoop is 50070. Use the following url to get Hadoop services on browser.\nhttp://localhost:50070/\n\nThe default port number to access all applications of cluster is 8088. Use the following url to visit this service.\nhttp://localhost:8088/\n\n\n 39 Lectures \n 2.5 hours \n\n Arnab Chakraborty\n\n 65 Lectures \n 6 hours \n\n Arnab Chakraborty\n\n 12 Lectures \n 1 hours \n\n Pranjal Srivastava\n\n 24 Lectures \n 6.5 hours \n\n Pari Margu\n\n 89 Lectures \n 11.5 hours \n\n TELCOMA Global\n\n 43 Lectures \n 1.5 hours \n\n Bigdata Engineer\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":2123,"s":1851,"text":"Hadoop is supported by GNU/Linux platform and its flavors. Therefore, we have to install a Linux operating system for setting up Hadoop environment. In case you have an OS other than Linux, you can install a Virtualbox software in it and have Linux inside the Virtualbox."},{"code":null,"e":2293,"s":2123,"text":"Before installing Hadoop into the Linux environment, we need to set up Linux using ssh (Secure Shell). Follow the steps given below for setting up the Linux environment."},{"code":null,"e":2467,"s":2293,"text":"At the beginning, it is recommended to create a separate user for Hadoop to isolate Hadoop file system from Unix file system. Follow the steps given below to create a user −"},{"code":null,"e":2505,"s":2467,"text":"Open the root using the command “su”."},{"code":null,"e":2543,"s":2505,"text":"Open the root using the command “su”."},{"code":null,"e":2617,"s":2543,"text":"Create a user from the root account using the command “useradd username”."},{"code":null,"e":2691,"s":2617,"text":"Create a user from the root account using the command “useradd username”."},{"code":null,"e":2767,"s":2691,"text":"Now you can open an existing user account using the command “su username”. "},{"code":null,"e":2843,"s":2767,"text":"Now you can open an existing user account using the command “su username”. "},{"code":null,"e":2917,"s":2843,"text":"Open the Linux terminal and type the following commands to create a user."},{"code":null,"e":3011,"s":2917,"text":"$ su \n password: \n# useradd hadoop \n# passwd hadoop \n New passwd: \n Retype new passwd \n"},{"code":null,"e":3284,"s":3011,"text":"SSH setup is required to do different operations on a cluster such as starting, stopping, distributed daemon shell operations. To authenticate different users of Hadoop, it is required to provide public/private key pair for a Hadoop user and share it with different users."},{"code":null,"e":3508,"s":3284,"text":"The following commands are used for generating a key value pair using SSH. Copy the public keys form id_rsa.pub to authorized_keys, and provide the owner with read and write permissions to authorized_keys file respectively."},{"code":null,"e":3618,"s":3508,"text":"$ ssh-keygen -t rsa \n$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys \n$ chmod 0600 ~/.ssh/authorized_keys \n"},{"code":null,"e":3815,"s":3618,"text":"Java is the main prerequisite for Hadoop. First of all, you should verify the existence of java in your system using the command “java -version”. The syntax of java version command is given below."},{"code":null,"e":3833,"s":3815,"text":"$ java -version \n"},{"code":null,"e":3899,"s":3833,"text":"If everything is in order, it will give you the following output."},{"code":null,"e":4037,"s":3899,"text":"java version \"1.7.0_71\" \nJava(TM) SE Runtime Environment (build 1.7.0_71-b13) \nJava HotSpot(TM) Client VM (build 25.0-b02, mixed mode) \n"},{"code":null,"e":4133,"s":4037,"text":"If java is not installed in your system, then follow the steps given below for installing java."},{"code":null,"e":4229,"s":4133,"text":"Download java (JDK - X64.tar.gz) by visiting the following link www.oracle.com"},{"code":null,"e":4298,"s":4229,"text":"Then jdk-7u71-linux-x64.tar.gz will be downloaded into your system. "},{"code":null,"e":4451,"s":4298,"text":"Generally you will find the downloaded java file in Downloads folder. Verify it and extract the jdk-7u71-linux-x64.gz file using the following commands."},{"code":null,"e":4575,"s":4451,"text":"$ cd Downloads/ \n$ ls \njdk-7u71-linux-x64.gz \n\n$ tar zxf jdk-7u71-linux-x64.gz \n$ ls \njdk1.7.0_71 jdk-7u71-linux-x64.gz \n"},{"code":null,"e":4711,"s":4575,"text":"To make java available to all the users, you have to move it to the location “/usr/local/”. Open root, and type the following commands."},{"code":null,"e":4767,"s":4711,"text":"$ su \npassword: \n# mv jdk1.7.0_71 /usr/local/ \n# exit \n"},{"code":null,"e":4858,"s":4767,"text":"For setting up PATH and JAVA_HOME variables, add the following commands to ~/.bashrc file."},{"code":null,"e":4934,"s":4858,"text":"export JAVA_HOME=/usr/local/jdk1.7.0_71 \nexport PATH=$PATH:$JAVA_HOME/bin \n"},{"code":null,"e":4993,"s":4934,"text":"Now apply all the changes into the current running system."},{"code":null,"e":5013,"s":4993,"text":"$ source ~/.bashrc\n"},{"code":null,"e":5073,"s":5013,"text":"Use the following commands to configure java alternatives −"},{"code":null,"e":5435,"s":5073,"text":"# alternatives --install /usr/bin/java java usr/local/java/bin/java 2\n# alternatives --install /usr/bin/javac javac usr/local/java/bin/javac 2\n# alternatives --install /usr/bin/jar jar usr/local/java/bin/jar 2\n\n# alternatives --set java usr/local/java/bin/java\n# alternatives --set javac usr/local/java/bin/javac\n# alternatives --set jar usr/local/java/bin/jar\n"},{"code":null,"e":5510,"s":5435,"text":"Now verify the java -version command from the terminal as explained above."},{"code":null,"e":5606,"s":5510,"text":"Download and extract Hadoop 2.4.1 from Apache software foundation using the following commands."},{"code":null,"e":5792,"s":5606,"text":"$ su \npassword: \n# cd /usr/local \n# wget http://apache.claz.org/hadoop/common/hadoop-2.4.1/ \nhadoop-2.4.1.tar.gz \n# tar xzf hadoop-2.4.1.tar.gz \n# mv hadoop-2.4.1/* to hadoop/ \n# exit \n"},{"code":null,"e":5899,"s":5792,"text":"Once you have downloaded Hadoop, you can operate your Hadoop cluster in one of the three supported modes −"},{"code":null,"e":6055,"s":5899,"text":"Local/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process."},{"code":null,"e":6211,"s":6055,"text":"Local/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process."},{"code":null,"e":6416,"s":6211,"text":"Pseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development."},{"code":null,"e":6621,"s":6416,"text":"Pseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development."},{"code":null,"e":6789,"s":6621,"text":"Fully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters."},{"code":null,"e":6957,"s":6789,"text":"Fully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters."},{"code":null,"e":7031,"s":6957,"text":"Here we will discuss the installation of Hadoop 2.4.1 in standalone mode."},{"code":null,"e":7217,"s":7031,"text":"There are no daemons running and everything runs in a single JVM. Standalone mode is suitable for running MapReduce programs during development, since it is easy to test and debug them."},{"code":null,"e":7313,"s":7217,"text":"You can set Hadoop environment variables by appending the following commands to ~/.bashrc file."},{"code":null,"e":7352,"s":7313,"text":"export HADOOP_HOME=/usr/local/hadoop \n"},{"code":null,"e":7465,"s":7352,"text":"Before proceeding further, you need to make sure that Hadoop is working fine. Just issue the following command −"},{"code":null,"e":7484,"s":7465,"text":"$ hadoop version \n"},{"code":null,"e":7566,"s":7484,"text":"If everything is fine with your setup, then you should see the following result −"},{"code":null,"e":7781,"s":7566,"text":"Hadoop 2.4.1 \nSubversion https://svn.apache.org/repos/asf/hadoop/common -r 1529768 \nCompiled by hortonmu on 2013-10-07T06:28Z \nCompiled with protoc 2.5.0\nFrom source with checksum 79e53ce7994d1628b240f09af91e1af4 \n"},{"code":null,"e":7930,"s":7781,"text":"It means your Hadoop's standalone mode setup is working fine. By default, Hadoop is configured to run in a non-distributed mode on a single machine."},{"code":null,"e":8180,"s":7930,"text":"Let's check a simple example of Hadoop. Hadoop installation delivers the following example MapReduce jar file, which provides basic functionality of MapReduce and can be used for calculating, like Pi value, word counts in a given list of files, etc."},{"code":null,"e":8254,"s":8180,"text":"$HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar \n"},{"code":null,"e":8716,"s":8254,"text":"Let's have an input directory where we will push a few files and our requirement is to count the total number of words in those files. To calculate the total number of words, we do not need to write our MapReduce, provided the .jar file contains the implementation for word count. You can try other examples using the same .jar file; just issue the following commands to check supported MapReduce functional programs by hadoop-mapreduce-examples-2.2.0.jar file."},{"code":null,"e":8802,"s":8716,"text":"$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar \n"},{"code":null,"e":8926,"s":8802,"text":"Create temporary content files in the input directory. You can create this input directory anywhere you would like to work."},{"code":null,"e":8988,"s":8926,"text":"$ mkdir input \n$ cp $HADOOP_HOME/*.txt input \n$ ls -l input \n"},{"code":null,"e":9047,"s":8988,"text":"It will give the following files in your input directory −"},{"code":null,"e":9220,"s":9047,"text":"total 24 \n-rw-r--r-- 1 root root 15164 Feb 21 10:14 LICENSE.txt \n-rw-r--r-- 1 root root 101 Feb 21 10:14 NOTICE.txt\n-rw-r--r-- 1 root root 1366 Feb 21 10:14 README.txt \n"},{"code":null,"e":9363,"s":9220,"text":"These files have been copied from the Hadoop installation home directory. For your experiment, you can have different and large sets of files."},{"code":null,"e":9493,"s":9363,"text":"Let's start the Hadoop process to count the total number of words in all the files available in the input directory, as follows −"},{"code":null,"e":9603,"s":9493,"text":"$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar wordcount input output \n"},{"code":null,"e":9721,"s":9603,"text":"Step-2 will do the required processing and save the output in output/part-r00000 file, which you can check by using −"},{"code":null,"e":9737,"s":9721,"text":"$cat output/* \n"},{"code":null,"e":9860,"s":9737,"text":"It will list down all the words along with their total counts available in all the files available in the input directory."},{"code":null,"e":10281,"s":9860,"text":"\"AS 4 \n\"Contribution\" 1 \n\"Contributor\" 1 \n\"Derivative 1\n\"Legal 1\n\"License\" 1\n\"License\"); 1 \n\"Licensor\" 1\n\"NOTICE” 1 \n\"Not 1 \n\"Object\" 1 \n\"Source” 1 \n\"Work” 1 \n\"You\" 1 \n\"Your\") 1 \n\"[]\" 1 \n\"control\" 1 \n\"printed 1 \n\"submitted\" 1 \n(50%) 1 \n(BIS), 1 \n(C) 1 \n(Don't) 1 \n(ECCN) 1 \n(INCLUDING 2 \n(INCLUDING, 2 \n.............\n"},{"code":null,"e":10362,"s":10281,"text":"Follow the steps given below to install Hadoop 2.4.1 in pseudo distributed mode."},{"code":null,"e":10458,"s":10362,"text":"You can set Hadoop environment variables by appending the following commands to ~/.bashrc file."},{"code":null,"e":10798,"s":10458,"text":"export HADOOP_HOME=/usr/local/hadoop \nexport HADOOP_MAPRED_HOME=$HADOOP_HOME \nexport HADOOP_COMMON_HOME=$HADOOP_HOME \n\nexport HADOOP_HDFS_HOME=$HADOOP_HOME \nexport YARN_HOME=$HADOOP_HOME \nexport HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native \nexport PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin \nexport HADOOP_INSTALL=$HADOOP_HOME \n"},{"code":null,"e":10857,"s":10798,"text":"Now apply all the changes into the current running system."},{"code":null,"e":10878,"s":10857,"text":"$ source ~/.bashrc \n"},{"code":null,"e":11070,"s":10878,"text":"You can find all the Hadoop configuration files in the location “$HADOOP_HOME/etc/hadoop”. It is required to make changes in those configuration files according to your Hadoop infrastructure."},{"code":null,"e":11100,"s":11070,"text":"$ cd $HADOOP_HOME/etc/hadoop\n"},{"code":null,"e":11287,"s":11100,"text":"In order to develop Hadoop programs in java, you have to reset the java environment variables in hadoop-env.sh file by replacing JAVA_HOME value with the location of java in your system."},{"code":null,"e":11328,"s":11287,"text":"export JAVA_HOME=/usr/local/jdk1.7.0_71\n"},{"code":null,"e":11408,"s":11328,"text":"The following are the list of files that you have to edit to configure Hadoop. "},{"code":null,"e":11422,"s":11408,"text":"core-site.xml"},{"code":null,"e":11621,"s":11422,"text":"The core-site.xml file contains information such as the port number used for Hadoop instance, memory allocated for the file system, memory limit for storing the data, and size of Read/Write buffers."},{"code":null,"e":11728,"s":11621,"text":"Open the core-site.xml and add the following properties in between , tags."},{"code":null,"e":11870,"s":11728,"text":"\n \n fs.default.name\n hdfs://localhost:9000 \n \n\n"},{"code":null,"e":11884,"s":11870,"text":"hdfs-site.xml"},{"code":null,"e":12099,"s":11884,"text":"The hdfs-site.xml file contains information such as the value of replication data, namenode path, and datanode paths of your local file systems. It means the place where you want to store the Hadoop infrastructure."},{"code":null,"e":12133,"s":12099,"text":"Let us assume the following data."},{"code":null,"e":12498,"s":12133,"text":"dfs.replication (data replication value) = 1 \n\n(In the below given path /hadoop/ is the user name. \nhadoopinfra/hdfs/namenode is the directory created by hdfs file system.) \nnamenode path = //home/hadoop/hadoopinfra/hdfs/namenode \n\n(hadoopinfra/hdfs/datanode is the directory created by hdfs file system.) \ndatanode path = //home/hadoop/hadoopinfra/hdfs/datanode \n"},{"code":null,"e":12613,"s":12498,"text":"Open this file and add the following properties in between the tags in this file."},{"code":null,"e":13004,"s":12613,"text":"\n \n dfs.replication\n 1\n \n \n \n dfs.name.dir\n file:///home/hadoop/hadoopinfra/hdfs/namenode \n \n \n \n dfs.data.dir \n file:///home/hadoop/hadoopinfra/hdfs/datanode \n \n\n"},{"code":null,"e":13137,"s":13004,"text":"Note − In the above file, all the property values are user-defined and you can make changes according to your Hadoop infrastructure."},{"code":null,"e":13151,"s":13137,"text":"yarn-site.xml"},{"code":null,"e":13329,"s":13151,"text":"This file is used to configure yarn into Hadoop. Open the yarn-site.xml file and add the following properties in between the , tags in this file."},{"code":null,"e":13481,"s":13329,"text":"\n \n yarn.nodemanager.aux-services\n mapreduce_shuffle \n \n\n"},{"code":null,"e":13497,"s":13481,"text":"mapred-site.xml"},{"code":null,"e":13753,"s":13497,"text":"This file is used to specify which MapReduce framework we are using. By default, Hadoop contains a template of yarn-site.xml. First of all, it is required to copy the file from mapred-site.xml.template to mapred-site.xml file using the following command."},{"code":null,"e":13801,"s":13753,"text":"$ cp mapred-site.xml.template mapred-site.xml \n"},{"code":null,"e":13927,"s":13801,"text":"Open mapred-site.xml file and add the following properties in between the , tags in this file."},{"code":null,"e":14061,"s":13927,"text":"\n \n mapreduce.framework.name\n yarn\n \n\n"},{"code":null,"e":14125,"s":14061,"text":"The following steps are used to verify the Hadoop installation."},{"code":null,"e":14199,"s":14125,"text":"Set up the namenode using the command “hdfs namenode -format” as follows."},{"code":null,"e":14233,"s":14199,"text":"$ cd ~ \n$ hdfs namenode -format \n"},{"code":null,"e":14268,"s":14233,"text":"The expected result is as follows."},{"code":null,"e":15078,"s":14268,"text":"10/24/14 21:30:55 INFO namenode.NameNode: STARTUP_MSG: \n/************************************************************ \nSTARTUP_MSG: Starting NameNode \nSTARTUP_MSG: host = localhost/192.168.1.11 \nSTARTUP_MSG: args = [-format] \nSTARTUP_MSG: version = 2.4.1 \n...\n...\n10/24/14 21:30:56 INFO common.Storage: Storage directory \n/home/hadoop/hadoopinfra/hdfs/namenode has been successfully formatted. \n10/24/14 21:30:56 INFO namenode.NNStorageRetentionManager: Going to \nretain 1 images with txid >= 0 \n10/24/14 21:30:56 INFO util.ExitUtil: Exiting with status 0 \n10/24/14 21:30:56 INFO namenode.NameNode: SHUTDOWN_MSG: \n/************************************************************ \nSHUTDOWN_MSG: Shutting down NameNode at localhost/192.168.1.11 \n************************************************************/\n"},{"code":null,"e":15181,"s":15078,"text":"The following command is used to start dfs. Executing this command will start your Hadoop file system."},{"code":null,"e":15198,"s":15181,"text":"$ start-dfs.sh \n"},{"code":null,"e":15234,"s":15198,"text":"The expected output is as follows −"},{"code":null,"e":15548,"s":15234,"text":"10/24/14 21:37:56 \nStarting namenodes on [localhost] \nlocalhost: starting namenode, logging to /home/hadoop/hadoop\n2.4.1/logs/hadoop-hadoop-namenode-localhost.out \nlocalhost: starting datanode, logging to /home/hadoop/hadoop\n2.4.1/logs/hadoop-hadoop-datanode-localhost.out \nStarting secondary namenodes [0.0.0.0]\n"},{"code":null,"e":15657,"s":15548,"text":"The following command is used to start the yarn script. Executing this command will start your yarn daemons."},{"code":null,"e":15675,"s":15657,"text":"$ start-yarn.sh \n"},{"code":null,"e":15708,"s":15675,"text":"The expected output as follows −"},{"code":null,"e":15957,"s":15708,"text":"starting yarn daemons \nstarting resourcemanager, logging to /home/hadoop/hadoop\n2.4.1/logs/yarn-hadoop-resourcemanager-localhost.out \nlocalhost: starting nodemanager, logging to /home/hadoop/hadoop\n2.4.1/logs/yarn-hadoop-nodemanager-localhost.out \n"},{"code":null,"e":16065,"s":15957,"text":"The default port number to access Hadoop is 50070. Use the following url to get Hadoop services on browser."},{"code":null,"e":16090,"s":16065,"text":"http://localhost:50070/\n"},{"code":null,"e":16206,"s":16090,"text":"The default port number to access all applications of cluster is 8088. Use the following url to visit this service."},{"code":null,"e":16230,"s":16206,"text":"http://localhost:8088/\n"},{"code":null,"e":16265,"s":16230,"text":"\n 39 Lectures \n 2.5 hours \n"},{"code":null,"e":16284,"s":16265,"text":" Arnab Chakraborty"},{"code":null,"e":16317,"s":16284,"text":"\n 65 Lectures \n 6 hours \n"},{"code":null,"e":16336,"s":16317,"text":" Arnab Chakraborty"},{"code":null,"e":16369,"s":16336,"text":"\n 12 Lectures \n 1 hours \n"},{"code":null,"e":16389,"s":16369,"text":" Pranjal Srivastava"},{"code":null,"e":16424,"s":16389,"text":"\n 24 Lectures \n 6.5 hours \n"},{"code":null,"e":16436,"s":16424,"text":" Pari Margu"},{"code":null,"e":16472,"s":16436,"text":"\n 89 Lectures \n 11.5 hours \n"},{"code":null,"e":16488,"s":16472,"text":" TELCOMA Global"},{"code":null,"e":16523,"s":16488,"text":"\n 43 Lectures \n 1.5 hours \n"},{"code":null,"e":16541,"s":16523,"text":" Bigdata Engineer"},{"code":null,"e":16548,"s":16541,"text":" Print"},{"code":null,"e":16559,"s":16548,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 2123,\n \"s\": 1851,\n \"text\": \"Hadoop is supported by GNU/Linux platform and its flavors. Therefore, we have to install a Linux operating system for setting up Hadoop environment. In case you have an OS other than Linux, you can install a Virtualbox software in it and have Linux inside the Virtualbox.\"\n },\n {\n \"code\": null,\n \"e\": 2293,\n \"s\": 2123,\n \"text\": \"Before installing Hadoop into the Linux environment, we need to set up Linux using ssh (Secure Shell). Follow the steps given below for setting up the Linux environment.\"\n },\n {\n \"code\": null,\n \"e\": 2467,\n \"s\": 2293,\n \"text\": \"At the beginning, it is recommended to create a separate user for Hadoop to isolate Hadoop file system from Unix file system. Follow the steps given below to create a user −\"\n },\n {\n \"code\": null,\n \"e\": 2505,\n \"s\": 2467,\n \"text\": \"Open the root using the command “su”.\"\n },\n {\n \"code\": null,\n \"e\": 2543,\n \"s\": 2505,\n \"text\": \"Open the root using the command “su”.\"\n },\n {\n \"code\": null,\n \"e\": 2617,\n \"s\": 2543,\n \"text\": \"Create a user from the root account using the command “useradd username”.\"\n },\n {\n \"code\": null,\n \"e\": 2691,\n \"s\": 2617,\n \"text\": \"Create a user from the root account using the command “useradd username”.\"\n },\n {\n \"code\": null,\n \"e\": 2767,\n \"s\": 2691,\n \"text\": \"Now you can open an existing user account using the command “su username”. \"\n },\n {\n \"code\": null,\n \"e\": 2843,\n \"s\": 2767,\n \"text\": \"Now you can open an existing user account using the command “su username”. \"\n },\n {\n \"code\": null,\n \"e\": 2917,\n \"s\": 2843,\n \"text\": \"Open the Linux terminal and type the following commands to create a user.\"\n },\n {\n \"code\": null,\n \"e\": 3011,\n \"s\": 2917,\n \"text\": \"$ su \\n password: \\n# useradd hadoop \\n# passwd hadoop \\n New passwd: \\n Retype new passwd \\n\"\n },\n {\n \"code\": null,\n \"e\": 3284,\n \"s\": 3011,\n \"text\": \"SSH setup is required to do different operations on a cluster such as starting, stopping, distributed daemon shell operations. To authenticate different users of Hadoop, it is required to provide public/private key pair for a Hadoop user and share it with different users.\"\n },\n {\n \"code\": null,\n \"e\": 3508,\n \"s\": 3284,\n \"text\": \"The following commands are used for generating a key value pair using SSH. Copy the public keys form id_rsa.pub to authorized_keys, and provide the owner with read and write permissions to authorized_keys file respectively.\"\n },\n {\n \"code\": null,\n \"e\": 3618,\n \"s\": 3508,\n \"text\": \"$ ssh-keygen -t rsa \\n$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys \\n$ chmod 0600 ~/.ssh/authorized_keys \\n\"\n },\n {\n \"code\": null,\n \"e\": 3815,\n \"s\": 3618,\n \"text\": \"Java is the main prerequisite for Hadoop. First of all, you should verify the existence of java in your system using the command “java -version”. The syntax of java version command is given below.\"\n },\n {\n \"code\": null,\n \"e\": 3833,\n \"s\": 3815,\n \"text\": \"$ java -version \\n\"\n },\n {\n \"code\": null,\n \"e\": 3899,\n \"s\": 3833,\n \"text\": \"If everything is in order, it will give you the following output.\"\n },\n {\n \"code\": null,\n \"e\": 4037,\n \"s\": 3899,\n \"text\": \"java version \\\"1.7.0_71\\\" \\nJava(TM) SE Runtime Environment (build 1.7.0_71-b13) \\nJava HotSpot(TM) Client VM (build 25.0-b02, mixed mode) \\n\"\n },\n {\n \"code\": null,\n \"e\": 4133,\n \"s\": 4037,\n \"text\": \"If java is not installed in your system, then follow the steps given below for installing java.\"\n },\n {\n \"code\": null,\n \"e\": 4229,\n \"s\": 4133,\n \"text\": \"Download java (JDK - X64.tar.gz) by visiting the following link www.oracle.com\"\n },\n {\n \"code\": null,\n \"e\": 4298,\n \"s\": 4229,\n \"text\": \"Then jdk-7u71-linux-x64.tar.gz will be downloaded into your system. \"\n },\n {\n \"code\": null,\n \"e\": 4451,\n \"s\": 4298,\n \"text\": \"Generally you will find the downloaded java file in Downloads folder. Verify it and extract the jdk-7u71-linux-x64.gz file using the following commands.\"\n },\n {\n \"code\": null,\n \"e\": 4575,\n \"s\": 4451,\n \"text\": \"$ cd Downloads/ \\n$ ls \\njdk-7u71-linux-x64.gz \\n\\n$ tar zxf jdk-7u71-linux-x64.gz \\n$ ls \\njdk1.7.0_71 jdk-7u71-linux-x64.gz \\n\"\n },\n {\n \"code\": null,\n \"e\": 4711,\n \"s\": 4575,\n \"text\": \"To make java available to all the users, you have to move it to the location “/usr/local/”. Open root, and type the following commands.\"\n },\n {\n \"code\": null,\n \"e\": 4767,\n \"s\": 4711,\n \"text\": \"$ su \\npassword: \\n# mv jdk1.7.0_71 /usr/local/ \\n# exit \\n\"\n },\n {\n \"code\": null,\n \"e\": 4858,\n \"s\": 4767,\n \"text\": \"For setting up PATH and JAVA_HOME variables, add the following commands to ~/.bashrc file.\"\n },\n {\n \"code\": null,\n \"e\": 4934,\n \"s\": 4858,\n \"text\": \"export JAVA_HOME=/usr/local/jdk1.7.0_71 \\nexport PATH=$PATH:$JAVA_HOME/bin \\n\"\n },\n {\n \"code\": null,\n \"e\": 4993,\n \"s\": 4934,\n \"text\": \"Now apply all the changes into the current running system.\"\n },\n {\n \"code\": null,\n \"e\": 5013,\n \"s\": 4993,\n \"text\": \"$ source ~/.bashrc\\n\"\n },\n {\n \"code\": null,\n \"e\": 5073,\n \"s\": 5013,\n \"text\": \"Use the following commands to configure java alternatives −\"\n },\n {\n \"code\": null,\n \"e\": 5435,\n \"s\": 5073,\n \"text\": \"# alternatives --install /usr/bin/java java usr/local/java/bin/java 2\\n# alternatives --install /usr/bin/javac javac usr/local/java/bin/javac 2\\n# alternatives --install /usr/bin/jar jar usr/local/java/bin/jar 2\\n\\n# alternatives --set java usr/local/java/bin/java\\n# alternatives --set javac usr/local/java/bin/javac\\n# alternatives --set jar usr/local/java/bin/jar\\n\"\n },\n {\n \"code\": null,\n \"e\": 5510,\n \"s\": 5435,\n \"text\": \"Now verify the java -version command from the terminal as explained above.\"\n },\n {\n \"code\": null,\n \"e\": 5606,\n \"s\": 5510,\n \"text\": \"Download and extract Hadoop 2.4.1 from Apache software foundation using the following commands.\"\n },\n {\n \"code\": null,\n \"e\": 5792,\n \"s\": 5606,\n \"text\": \"$ su \\npassword: \\n# cd /usr/local \\n# wget http://apache.claz.org/hadoop/common/hadoop-2.4.1/ \\nhadoop-2.4.1.tar.gz \\n# tar xzf hadoop-2.4.1.tar.gz \\n# mv hadoop-2.4.1/* to hadoop/ \\n# exit \\n\"\n },\n {\n \"code\": null,\n \"e\": 5899,\n \"s\": 5792,\n \"text\": \"Once you have downloaded Hadoop, you can operate your Hadoop cluster in one of the three supported modes −\"\n },\n {\n \"code\": null,\n \"e\": 6055,\n \"s\": 5899,\n \"text\": \"Local/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process.\"\n },\n {\n \"code\": null,\n \"e\": 6211,\n \"s\": 6055,\n \"text\": \"Local/Standalone Mode − After downloading Hadoop in your system, by default, it is configured in a standalone mode and can be run as a single java process.\"\n },\n {\n \"code\": null,\n \"e\": 6416,\n \"s\": 6211,\n \"text\": \"Pseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development.\"\n },\n {\n \"code\": null,\n \"e\": 6621,\n \"s\": 6416,\n \"text\": \"Pseudo Distributed Mode − It is a distributed simulation on single machine. Each Hadoop daemon such as hdfs, yarn, MapReduce etc., will run as a separate java process. This mode is useful for development.\"\n },\n {\n \"code\": null,\n \"e\": 6789,\n \"s\": 6621,\n \"text\": \"Fully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters.\"\n },\n {\n \"code\": null,\n \"e\": 6957,\n \"s\": 6789,\n \"text\": \"Fully Distributed Mode − This mode is fully distributed with minimum two or more machines as a cluster. We will come across this mode in detail in the coming chapters.\"\n },\n {\n \"code\": null,\n \"e\": 7031,\n \"s\": 6957,\n \"text\": \"Here we will discuss the installation of Hadoop 2.4.1 in standalone mode.\"\n },\n {\n \"code\": null,\n \"e\": 7217,\n \"s\": 7031,\n \"text\": \"There are no daemons running and everything runs in a single JVM. Standalone mode is suitable for running MapReduce programs during development, since it is easy to test and debug them.\"\n },\n {\n \"code\": null,\n \"e\": 7313,\n \"s\": 7217,\n \"text\": \"You can set Hadoop environment variables by appending the following commands to ~/.bashrc file.\"\n },\n {\n \"code\": null,\n \"e\": 7352,\n \"s\": 7313,\n \"text\": \"export HADOOP_HOME=/usr/local/hadoop \\n\"\n },\n {\n \"code\": null,\n \"e\": 7465,\n \"s\": 7352,\n \"text\": \"Before proceeding further, you need to make sure that Hadoop is working fine. Just issue the following command −\"\n },\n {\n \"code\": null,\n \"e\": 7484,\n \"s\": 7465,\n \"text\": \"$ hadoop version \\n\"\n },\n {\n \"code\": null,\n \"e\": 7566,\n \"s\": 7484,\n \"text\": \"If everything is fine with your setup, then you should see the following result −\"\n },\n {\n \"code\": null,\n \"e\": 7781,\n \"s\": 7566,\n \"text\": \"Hadoop 2.4.1 \\nSubversion https://svn.apache.org/repos/asf/hadoop/common -r 1529768 \\nCompiled by hortonmu on 2013-10-07T06:28Z \\nCompiled with protoc 2.5.0\\nFrom source with checksum 79e53ce7994d1628b240f09af91e1af4 \\n\"\n },\n {\n \"code\": null,\n \"e\": 7930,\n \"s\": 7781,\n \"text\": \"It means your Hadoop's standalone mode setup is working fine. By default, Hadoop is configured to run in a non-distributed mode on a single machine.\"\n },\n {\n \"code\": null,\n \"e\": 8180,\n \"s\": 7930,\n \"text\": \"Let's check a simple example of Hadoop. Hadoop installation delivers the following example MapReduce jar file, which provides basic functionality of MapReduce and can be used for calculating, like Pi value, word counts in a given list of files, etc.\"\n },\n {\n \"code\": null,\n \"e\": 8254,\n \"s\": 8180,\n \"text\": \"$HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar \\n\"\n },\n {\n \"code\": null,\n \"e\": 8716,\n \"s\": 8254,\n \"text\": \"Let's have an input directory where we will push a few files and our requirement is to count the total number of words in those files. To calculate the total number of words, we do not need to write our MapReduce, provided the .jar file contains the implementation for word count. You can try other examples using the same .jar file; just issue the following commands to check supported MapReduce functional programs by hadoop-mapreduce-examples-2.2.0.jar file.\"\n },\n {\n \"code\": null,\n \"e\": 8802,\n \"s\": 8716,\n \"text\": \"$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar \\n\"\n },\n {\n \"code\": null,\n \"e\": 8926,\n \"s\": 8802,\n \"text\": \"Create temporary content files in the input directory. You can create this input directory anywhere you would like to work.\"\n },\n {\n \"code\": null,\n \"e\": 8988,\n \"s\": 8926,\n \"text\": \"$ mkdir input \\n$ cp $HADOOP_HOME/*.txt input \\n$ ls -l input \\n\"\n },\n {\n \"code\": null,\n \"e\": 9047,\n \"s\": 8988,\n \"text\": \"It will give the following files in your input directory −\"\n },\n {\n \"code\": null,\n \"e\": 9220,\n \"s\": 9047,\n \"text\": \"total 24 \\n-rw-r--r-- 1 root root 15164 Feb 21 10:14 LICENSE.txt \\n-rw-r--r-- 1 root root 101 Feb 21 10:14 NOTICE.txt\\n-rw-r--r-- 1 root root 1366 Feb 21 10:14 README.txt \\n\"\n },\n {\n \"code\": null,\n \"e\": 9363,\n \"s\": 9220,\n \"text\": \"These files have been copied from the Hadoop installation home directory. For your experiment, you can have different and large sets of files.\"\n },\n {\n \"code\": null,\n \"e\": 9493,\n \"s\": 9363,\n \"text\": \"Let's start the Hadoop process to count the total number of words in all the files available in the input directory, as follows −\"\n },\n {\n \"code\": null,\n \"e\": 9603,\n \"s\": 9493,\n \"text\": \"$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduceexamples-2.2.0.jar wordcount input output \\n\"\n },\n {\n \"code\": null,\n \"e\": 9721,\n \"s\": 9603,\n \"text\": \"Step-2 will do the required processing and save the output in output/part-r00000 file, which you can check by using −\"\n },\n {\n \"code\": null,\n \"e\": 9737,\n \"s\": 9721,\n \"text\": \"$cat output/* \\n\"\n },\n {\n \"code\": null,\n \"e\": 9860,\n \"s\": 9737,\n \"text\": \"It will list down all the words along with their total counts available in all the files available in the input directory.\"\n },\n {\n \"code\": null,\n \"e\": 10281,\n \"s\": 9860,\n \"text\": \"\\\"AS 4 \\n\\\"Contribution\\\" 1 \\n\\\"Contributor\\\" 1 \\n\\\"Derivative 1\\n\\\"Legal 1\\n\\\"License\\\" 1\\n\\\"License\\\"); 1 \\n\\\"Licensor\\\" 1\\n\\\"NOTICE” 1 \\n\\\"Not 1 \\n\\\"Object\\\" 1 \\n\\\"Source” 1 \\n\\\"Work” 1 \\n\\\"You\\\" 1 \\n\\\"Your\\\") 1 \\n\\\"[]\\\" 1 \\n\\\"control\\\" 1 \\n\\\"printed 1 \\n\\\"submitted\\\" 1 \\n(50%) 1 \\n(BIS), 1 \\n(C) 1 \\n(Don't) 1 \\n(ECCN) 1 \\n(INCLUDING 2 \\n(INCLUDING, 2 \\n.............\\n\"\n },\n {\n \"code\": null,\n \"e\": 10362,\n \"s\": 10281,\n \"text\": \"Follow the steps given below to install Hadoop 2.4.1 in pseudo distributed mode.\"\n },\n {\n \"code\": null,\n \"e\": 10458,\n \"s\": 10362,\n \"text\": \"You can set Hadoop environment variables by appending the following commands to ~/.bashrc file.\"\n },\n {\n \"code\": null,\n \"e\": 10798,\n \"s\": 10458,\n \"text\": \"export HADOOP_HOME=/usr/local/hadoop \\nexport HADOOP_MAPRED_HOME=$HADOOP_HOME \\nexport HADOOP_COMMON_HOME=$HADOOP_HOME \\n\\nexport HADOOP_HDFS_HOME=$HADOOP_HOME \\nexport YARN_HOME=$HADOOP_HOME \\nexport HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native \\nexport PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin \\nexport HADOOP_INSTALL=$HADOOP_HOME \\n\"\n },\n {\n \"code\": null,\n \"e\": 10857,\n \"s\": 10798,\n \"text\": \"Now apply all the changes into the current running system.\"\n },\n {\n \"code\": null,\n \"e\": 10878,\n \"s\": 10857,\n \"text\": \"$ source ~/.bashrc \\n\"\n },\n {\n \"code\": null,\n \"e\": 11070,\n \"s\": 10878,\n \"text\": \"You can find all the Hadoop configuration files in the location “$HADOOP_HOME/etc/hadoop”. It is required to make changes in those configuration files according to your Hadoop infrastructure.\"\n },\n {\n \"code\": null,\n \"e\": 11100,\n \"s\": 11070,\n \"text\": \"$ cd $HADOOP_HOME/etc/hadoop\\n\"\n },\n {\n \"code\": null,\n \"e\": 11287,\n \"s\": 11100,\n \"text\": \"In order to develop Hadoop programs in java, you have to reset the java environment variables in hadoop-env.sh file by replacing JAVA_HOME value with the location of java in your system.\"\n },\n {\n \"code\": null,\n \"e\": 11328,\n \"s\": 11287,\n \"text\": \"export JAVA_HOME=/usr/local/jdk1.7.0_71\\n\"\n },\n {\n \"code\": null,\n \"e\": 11408,\n \"s\": 11328,\n \"text\": \"The following are the list of files that you have to edit to configure Hadoop. \"\n },\n {\n \"code\": null,\n \"e\": 11422,\n \"s\": 11408,\n \"text\": \"core-site.xml\"\n },\n {\n \"code\": null,\n \"e\": 11621,\n \"s\": 11422,\n \"text\": \"The core-site.xml file contains information such as the port number used for Hadoop instance, memory allocated for the file system, memory limit for storing the data, and size of Read/Write buffers.\"\n },\n {\n \"code\": null,\n \"e\": 11728,\n \"s\": 11621,\n \"text\": \"Open the core-site.xml and add the following properties in between , tags.\"\n },\n {\n \"code\": null,\n \"e\": 11870,\n \"s\": 11728,\n \"text\": \"\\n \\n fs.default.name\\n hdfs://localhost:9000 \\n \\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 11884,\n \"s\": 11870,\n \"text\": \"hdfs-site.xml\"\n },\n {\n \"code\": null,\n \"e\": 12099,\n \"s\": 11884,\n \"text\": \"The hdfs-site.xml file contains information such as the value of replication data, namenode path, and datanode paths of your local file systems. It means the place where you want to store the Hadoop infrastructure.\"\n },\n {\n \"code\": null,\n \"e\": 12133,\n \"s\": 12099,\n \"text\": \"Let us assume the following data.\"\n },\n {\n \"code\": null,\n \"e\": 12498,\n \"s\": 12133,\n \"text\": \"dfs.replication (data replication value) = 1 \\n\\n(In the below given path /hadoop/ is the user name. \\nhadoopinfra/hdfs/namenode is the directory created by hdfs file system.) \\nnamenode path = //home/hadoop/hadoopinfra/hdfs/namenode \\n\\n(hadoopinfra/hdfs/datanode is the directory created by hdfs file system.) \\ndatanode path = //home/hadoop/hadoopinfra/hdfs/datanode \\n\"\n },\n {\n \"code\": null,\n \"e\": 12613,\n \"s\": 12498,\n \"text\": \"Open this file and add the following properties in between the tags in this file.\"\n },\n {\n \"code\": null,\n \"e\": 13004,\n \"s\": 12613,\n \"text\": \"\\n \\n dfs.replication\\n 1\\n \\n \\n \\n dfs.name.dir\\n file:///home/hadoop/hadoopinfra/hdfs/namenode \\n \\n \\n \\n dfs.data.dir \\n file:///home/hadoop/hadoopinfra/hdfs/datanode \\n \\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 13137,\n \"s\": 13004,\n \"text\": \"Note − In the above file, all the property values are user-defined and you can make changes according to your Hadoop infrastructure.\"\n },\n {\n \"code\": null,\n \"e\": 13151,\n \"s\": 13137,\n \"text\": \"yarn-site.xml\"\n },\n {\n \"code\": null,\n \"e\": 13329,\n \"s\": 13151,\n \"text\": \"This file is used to configure yarn into Hadoop. Open the yarn-site.xml file and add the following properties in between the , tags in this file.\"\n },\n {\n \"code\": null,\n \"e\": 13481,\n \"s\": 13329,\n \"text\": \"\\n \\n yarn.nodemanager.aux-services\\n mapreduce_shuffle \\n \\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 13497,\n \"s\": 13481,\n \"text\": \"mapred-site.xml\"\n },\n {\n \"code\": null,\n \"e\": 13753,\n \"s\": 13497,\n \"text\": \"This file is used to specify which MapReduce framework we are using. By default, Hadoop contains a template of yarn-site.xml. First of all, it is required to copy the file from mapred-site.xml.template to mapred-site.xml file using the following command.\"\n },\n {\n \"code\": null,\n \"e\": 13801,\n \"s\": 13753,\n \"text\": \"$ cp mapred-site.xml.template mapred-site.xml \\n\"\n },\n {\n \"code\": null,\n \"e\": 13927,\n \"s\": 13801,\n \"text\": \"Open mapred-site.xml file and add the following properties in between the , tags in this file.\"\n },\n {\n \"code\": null,\n \"e\": 14061,\n \"s\": 13927,\n \"text\": \"\\n \\n mapreduce.framework.name\\n yarn\\n \\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 14125,\n \"s\": 14061,\n \"text\": \"The following steps are used to verify the Hadoop installation.\"\n },\n {\n \"code\": null,\n \"e\": 14199,\n \"s\": 14125,\n \"text\": \"Set up the namenode using the command “hdfs namenode -format” as follows.\"\n },\n {\n \"code\": null,\n \"e\": 14233,\n \"s\": 14199,\n \"text\": \"$ cd ~ \\n$ hdfs namenode -format \\n\"\n },\n {\n \"code\": null,\n \"e\": 14268,\n \"s\": 14233,\n \"text\": \"The expected result is as follows.\"\n },\n {\n \"code\": null,\n \"e\": 15078,\n \"s\": 14268,\n \"text\": \"10/24/14 21:30:55 INFO namenode.NameNode: STARTUP_MSG: \\n/************************************************************ \\nSTARTUP_MSG: Starting NameNode \\nSTARTUP_MSG: host = localhost/192.168.1.11 \\nSTARTUP_MSG: args = [-format] \\nSTARTUP_MSG: version = 2.4.1 \\n...\\n...\\n10/24/14 21:30:56 INFO common.Storage: Storage directory \\n/home/hadoop/hadoopinfra/hdfs/namenode has been successfully formatted. \\n10/24/14 21:30:56 INFO namenode.NNStorageRetentionManager: Going to \\nretain 1 images with txid >= 0 \\n10/24/14 21:30:56 INFO util.ExitUtil: Exiting with status 0 \\n10/24/14 21:30:56 INFO namenode.NameNode: SHUTDOWN_MSG: \\n/************************************************************ \\nSHUTDOWN_MSG: Shutting down NameNode at localhost/192.168.1.11 \\n************************************************************/\\n\"\n },\n {\n \"code\": null,\n \"e\": 15181,\n \"s\": 15078,\n \"text\": \"The following command is used to start dfs. Executing this command will start your Hadoop file system.\"\n },\n {\n \"code\": null,\n \"e\": 15198,\n \"s\": 15181,\n \"text\": \"$ start-dfs.sh \\n\"\n },\n {\n \"code\": null,\n \"e\": 15234,\n \"s\": 15198,\n \"text\": \"The expected output is as follows −\"\n },\n {\n \"code\": null,\n \"e\": 15548,\n \"s\": 15234,\n \"text\": \"10/24/14 21:37:56 \\nStarting namenodes on [localhost] \\nlocalhost: starting namenode, logging to /home/hadoop/hadoop\\n2.4.1/logs/hadoop-hadoop-namenode-localhost.out \\nlocalhost: starting datanode, logging to /home/hadoop/hadoop\\n2.4.1/logs/hadoop-hadoop-datanode-localhost.out \\nStarting secondary namenodes [0.0.0.0]\\n\"\n },\n {\n \"code\": null,\n \"e\": 15657,\n \"s\": 15548,\n \"text\": \"The following command is used to start the yarn script. Executing this command will start your yarn daemons.\"\n },\n {\n \"code\": null,\n \"e\": 15675,\n \"s\": 15657,\n \"text\": \"$ start-yarn.sh \\n\"\n },\n {\n \"code\": null,\n \"e\": 15708,\n \"s\": 15675,\n \"text\": \"The expected output as follows −\"\n },\n {\n \"code\": null,\n \"e\": 15957,\n \"s\": 15708,\n \"text\": \"starting yarn daemons \\nstarting resourcemanager, logging to /home/hadoop/hadoop\\n2.4.1/logs/yarn-hadoop-resourcemanager-localhost.out \\nlocalhost: starting nodemanager, logging to /home/hadoop/hadoop\\n2.4.1/logs/yarn-hadoop-nodemanager-localhost.out \\n\"\n },\n {\n \"code\": null,\n \"e\": 16065,\n \"s\": 15957,\n \"text\": \"The default port number to access Hadoop is 50070. Use the following url to get Hadoop services on browser.\"\n },\n {\n \"code\": null,\n \"e\": 16090,\n \"s\": 16065,\n \"text\": \"http://localhost:50070/\\n\"\n },\n {\n \"code\": null,\n \"e\": 16206,\n \"s\": 16090,\n \"text\": \"The default port number to access all applications of cluster is 8088. Use the following url to visit this service.\"\n },\n {\n \"code\": null,\n \"e\": 16230,\n \"s\": 16206,\n \"text\": \"http://localhost:8088/\\n\"\n },\n {\n \"code\": null,\n \"e\": 16265,\n \"s\": 16230,\n \"text\": \"\\n 39 Lectures \\n 2.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16284,\n \"s\": 16265,\n \"text\": \" Arnab Chakraborty\"\n },\n {\n \"code\": null,\n \"e\": 16317,\n \"s\": 16284,\n \"text\": \"\\n 65 Lectures \\n 6 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16336,\n \"s\": 16317,\n \"text\": \" Arnab Chakraborty\"\n },\n {\n \"code\": null,\n \"e\": 16369,\n \"s\": 16336,\n \"text\": \"\\n 12 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16389,\n \"s\": 16369,\n \"text\": \" Pranjal Srivastava\"\n },\n {\n \"code\": null,\n \"e\": 16424,\n \"s\": 16389,\n \"text\": \"\\n 24 Lectures \\n 6.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16436,\n \"s\": 16424,\n \"text\": \" Pari Margu\"\n },\n {\n \"code\": null,\n \"e\": 16472,\n \"s\": 16436,\n \"text\": \"\\n 89 Lectures \\n 11.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16488,\n \"s\": 16472,\n \"text\": \" TELCOMA Global\"\n },\n {\n \"code\": null,\n \"e\": 16523,\n \"s\": 16488,\n \"text\": \"\\n 43 Lectures \\n 1.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 16541,\n \"s\": 16523,\n \"text\": \" Bigdata Engineer\"\n },\n {\n \"code\": null,\n \"e\": 16548,\n \"s\": 16541,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 16559,\n \"s\": 16548,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":58,"cells":{"title":{"kind":"string","value":"Creating fractals with Python. Try it yourself in this post! | by Dhanesh Budhrani | Towards Data Science"},"text":{"kind":"string","value":"First of all, what is a geometric fractal? A geometric fractal is a geometric shape with a repeating structure at different scales: it doesn’t matter whether you get closer to the image or not, you’ll always see the same pattern. Or, as defined by Benoit Mandelbrot, “a rough or fragmented geometric shape that can be split into parts, each of which is (at least approximately) a reduced-size copy of the whole”.\nNow, how can we build a fractal in Python? Given that we are repeating a structure at different scales, we’ll need to apply a recursive solution. Moreover, we’ll be using turtle to draw the fractals. In this post, we’ll be drawing both a fractal tree and a Koch snowflake.\nIn order to create a tree, we are going to divide each branch into two sub-branches (left and right) and shorten the new sub-branches, until we reach a minimum branch length, defined by ourselves:\nimport turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): passtree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()\nSo far, we’ve just defined the basics. We’ve imported turtle and created an instance of turtle.Turtle(), which will be the object moving around the canvas and drawing our tree. We’ve then made it face upwards with setheading(). We’ve also defined the signature of our recursive function, which will be the following:\nt: our Turtle instance.\nbranch_length: the current length of the branch in pixels.\nshorten_by: determines by how many pixels the sub-branches will be shorter than the parent branch.\nangle: the angles from which the sub-branches emerge from the parent branch.\nMoreover, we’ve defined the MINIMUM_BRANCH_LENGTH (in pixels), which sets the minimum threshold to create further sub-branches.\nLet’s now build the body of our recursive function:\nimport turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): if branch_length > MINIMUM_BRANCH_LENGTH: t.forward(branch_length) new_length = branch_length - shorten_by t.left(angle) build_tree(t, new_length, shorten_by, angle) t.right(angle * 2) build_tree(t, new_length, shorten_by, angle) t.left(angle) t.backward(branch_length)tree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()\nAs you can see, we reach our base case if branch_length is lower than MINIMUM_BRANCH_LENGTH. Otherwise, we draw the branch and proceed to create the sub-branches by computing their length and turning left and right by “angle” degrees and calling build_tree again with the new values. Finally, we move backwards to the root of our branch.\nIf you execute the code you should obtain the following result:\nFinally, feel free to play around with the code (and the parameters) here!\nIn the second section of this post we’ll be drawing a more complex structure: the Koch snowflake.\nFirst of all, we’ll need to create a recursive function to create the Koch curve, and then we’ll be joining 3 of these curves to create a snowflake. Let’s start by defining the parameters of our recursive function:\nt: our Turtle instance.\niterations: represents the value of n in the image below this list (note that n=0 would represent a flat line, which will be the base case in our recursive function).\nlength: the length of each side in our current (sub-)snowflake.\nshortening_factor: determines the factor by which the side length is divided when we create a new sub-snowflake.\nangle: determines the angle from which the new side emerges.\nOnce we have defined the basic structure of our recursive function, we may reach the following point:\nimport turtledef koch_curve(t, iterations, length, shortening_factor, angle): passt = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()\nAt this point, we just have to implement the recursive function. If we have reached our base case, we’ll just draw a line. Otherwise, we’ll update our parameters (specifically, iterations and length) and call our recursive function 4 times. Between these function calls we’ll be turning first to the left, then to the right and finally to the left again. Let’s see how the full implementation looks:\nimport turtledef koch_curve(t, iterations, length, shortening_factor, angle): if iterations == 0: t.forward(length) else: iterations = iterations - 1 length = length / shortening_factor koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle) t.right(angle * 2) koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle)t = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()\nIf you execute the code above, you should obtain the following result:\nAgain, feel free to play around with the code (and parameters) here!"},"parsed":{"kind":"list like","value":[{"code":null,"e":584,"s":171,"text":"First of all, what is a geometric fractal? A geometric fractal is a geometric shape with a repeating structure at different scales: it doesn’t matter whether you get closer to the image or not, you’ll always see the same pattern. Or, as defined by Benoit Mandelbrot, “a rough or fragmented geometric shape that can be split into parts, each of which is (at least approximately) a reduced-size copy of the whole”."},{"code":null,"e":857,"s":584,"text":"Now, how can we build a fractal in Python? Given that we are repeating a structure at different scales, we’ll need to apply a recursive solution. Moreover, we’ll be using turtle to draw the fractals. In this post, we’ll be drawing both a fractal tree and a Koch snowflake."},{"code":null,"e":1054,"s":857,"text":"In order to create a tree, we are going to divide each branch into two sub-branches (left and right) and shorten the new sub-branches, until we reach a minimum branch length, defined by ourselves:"},{"code":null,"e":1272,"s":1054,"text":"import turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): passtree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()"},{"code":null,"e":1589,"s":1272,"text":"So far, we’ve just defined the basics. We’ve imported turtle and created an instance of turtle.Turtle(), which will be the object moving around the canvas and drawing our tree. We’ve then made it face upwards with setheading(). We’ve also defined the signature of our recursive function, which will be the following:"},{"code":null,"e":1613,"s":1589,"text":"t: our Turtle instance."},{"code":null,"e":1672,"s":1613,"text":"branch_length: the current length of the branch in pixels."},{"code":null,"e":1771,"s":1672,"text":"shorten_by: determines by how many pixels the sub-branches will be shorter than the parent branch."},{"code":null,"e":1848,"s":1771,"text":"angle: the angles from which the sub-branches emerge from the parent branch."},{"code":null,"e":1976,"s":1848,"text":"Moreover, we’ve defined the MINIMUM_BRANCH_LENGTH (in pixels), which sets the minimum threshold to create further sub-branches."},{"code":null,"e":2028,"s":1976,"text":"Let’s now build the body of our recursive function:"},{"code":null,"e":2535,"s":2028,"text":"import turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): if branch_length > MINIMUM_BRANCH_LENGTH: t.forward(branch_length) new_length = branch_length - shorten_by t.left(angle) build_tree(t, new_length, shorten_by, angle) t.right(angle * 2) build_tree(t, new_length, shorten_by, angle) t.left(angle) t.backward(branch_length)tree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()"},{"code":null,"e":2873,"s":2535,"text":"As you can see, we reach our base case if branch_length is lower than MINIMUM_BRANCH_LENGTH. Otherwise, we draw the branch and proceed to create the sub-branches by computing their length and turning left and right by “angle” degrees and calling build_tree again with the new values. Finally, we move backwards to the root of our branch."},{"code":null,"e":2937,"s":2873,"text":"If you execute the code you should obtain the following result:"},{"code":null,"e":3012,"s":2937,"text":"Finally, feel free to play around with the code (and the parameters) here!"},{"code":null,"e":3110,"s":3012,"text":"In the second section of this post we’ll be drawing a more complex structure: the Koch snowflake."},{"code":null,"e":3325,"s":3110,"text":"First of all, we’ll need to create a recursive function to create the Koch curve, and then we’ll be joining 3 of these curves to create a snowflake. Let’s start by defining the parameters of our recursive function:"},{"code":null,"e":3349,"s":3325,"text":"t: our Turtle instance."},{"code":null,"e":3516,"s":3349,"text":"iterations: represents the value of n in the image below this list (note that n=0 would represent a flat line, which will be the base case in our recursive function)."},{"code":null,"e":3580,"s":3516,"text":"length: the length of each side in our current (sub-)snowflake."},{"code":null,"e":3693,"s":3580,"text":"shortening_factor: determines the factor by which the side length is divided when we create a new sub-snowflake."},{"code":null,"e":3754,"s":3693,"text":"angle: determines the angle from which the new side emerges."},{"code":null,"e":3856,"s":3754,"text":"Once we have defined the basic structure of our recursive function, we may reach the following point:"},{"code":null,"e":4052,"s":3856,"text":"import turtledef koch_curve(t, iterations, length, shortening_factor, angle): passt = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()"},{"code":null,"e":4452,"s":4052,"text":"At this point, we just have to implement the recursive function. If we have reached our base case, we’ll just draw a line. Otherwise, we’ll update our parameters (specifically, iterations and length) and call our recursive function 4 times. Between these function calls we’ll be turning first to the left, then to the right and finally to the left again. Let’s see how the full implementation looks:"},{"code":null,"e":5069,"s":4452,"text":"import turtledef koch_curve(t, iterations, length, shortening_factor, angle): if iterations == 0: t.forward(length) else: iterations = iterations - 1 length = length / shortening_factor koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle) t.right(angle * 2) koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle)t = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()"},{"code":null,"e":5140,"s":5069,"text":"If you execute the code above, you should obtain the following result:"}],"string":"[\n {\n \"code\": null,\n \"e\": 584,\n \"s\": 171,\n \"text\": \"First of all, what is a geometric fractal? A geometric fractal is a geometric shape with a repeating structure at different scales: it doesn’t matter whether you get closer to the image or not, you’ll always see the same pattern. Or, as defined by Benoit Mandelbrot, “a rough or fragmented geometric shape that can be split into parts, each of which is (at least approximately) a reduced-size copy of the whole”.\"\n },\n {\n \"code\": null,\n \"e\": 857,\n \"s\": 584,\n \"text\": \"Now, how can we build a fractal in Python? Given that we are repeating a structure at different scales, we’ll need to apply a recursive solution. Moreover, we’ll be using turtle to draw the fractals. In this post, we’ll be drawing both a fractal tree and a Koch snowflake.\"\n },\n {\n \"code\": null,\n \"e\": 1054,\n \"s\": 857,\n \"text\": \"In order to create a tree, we are going to divide each branch into two sub-branches (left and right) and shorten the new sub-branches, until we reach a minimum branch length, defined by ourselves:\"\n },\n {\n \"code\": null,\n \"e\": 1272,\n \"s\": 1054,\n \"text\": \"import turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): passtree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()\"\n },\n {\n \"code\": null,\n \"e\": 1589,\n \"s\": 1272,\n \"text\": \"So far, we’ve just defined the basics. We’ve imported turtle and created an instance of turtle.Turtle(), which will be the object moving around the canvas and drawing our tree. We’ve then made it face upwards with setheading(). We’ve also defined the signature of our recursive function, which will be the following:\"\n },\n {\n \"code\": null,\n \"e\": 1613,\n \"s\": 1589,\n \"text\": \"t: our Turtle instance.\"\n },\n {\n \"code\": null,\n \"e\": 1672,\n \"s\": 1613,\n \"text\": \"branch_length: the current length of the branch in pixels.\"\n },\n {\n \"code\": null,\n \"e\": 1771,\n \"s\": 1672,\n \"text\": \"shorten_by: determines by how many pixels the sub-branches will be shorter than the parent branch.\"\n },\n {\n \"code\": null,\n \"e\": 1848,\n \"s\": 1771,\n \"text\": \"angle: the angles from which the sub-branches emerge from the parent branch.\"\n },\n {\n \"code\": null,\n \"e\": 1976,\n \"s\": 1848,\n \"text\": \"Moreover, we’ve defined the MINIMUM_BRANCH_LENGTH (in pixels), which sets the minimum threshold to create further sub-branches.\"\n },\n {\n \"code\": null,\n \"e\": 2028,\n \"s\": 1976,\n \"text\": \"Let’s now build the body of our recursive function:\"\n },\n {\n \"code\": null,\n \"e\": 2535,\n \"s\": 2028,\n \"text\": \"import turtleMINIMUM_BRANCH_LENGTH = 5def build_tree(t, branch_length, shorten_by, angle): if branch_length > MINIMUM_BRANCH_LENGTH: t.forward(branch_length) new_length = branch_length - shorten_by t.left(angle) build_tree(t, new_length, shorten_by, angle) t.right(angle * 2) build_tree(t, new_length, shorten_by, angle) t.left(angle) t.backward(branch_length)tree = turtle.Turtle()tree.hideturtle()tree.setheading(90)tree.color('green')build_tree(tree, 50, 5, 30)turtle.mainloop()\"\n },\n {\n \"code\": null,\n \"e\": 2873,\n \"s\": 2535,\n \"text\": \"As you can see, we reach our base case if branch_length is lower than MINIMUM_BRANCH_LENGTH. Otherwise, we draw the branch and proceed to create the sub-branches by computing their length and turning left and right by “angle” degrees and calling build_tree again with the new values. Finally, we move backwards to the root of our branch.\"\n },\n {\n \"code\": null,\n \"e\": 2937,\n \"s\": 2873,\n \"text\": \"If you execute the code you should obtain the following result:\"\n },\n {\n \"code\": null,\n \"e\": 3012,\n \"s\": 2937,\n \"text\": \"Finally, feel free to play around with the code (and the parameters) here!\"\n },\n {\n \"code\": null,\n \"e\": 3110,\n \"s\": 3012,\n \"text\": \"In the second section of this post we’ll be drawing a more complex structure: the Koch snowflake.\"\n },\n {\n \"code\": null,\n \"e\": 3325,\n \"s\": 3110,\n \"text\": \"First of all, we’ll need to create a recursive function to create the Koch curve, and then we’ll be joining 3 of these curves to create a snowflake. Let’s start by defining the parameters of our recursive function:\"\n },\n {\n \"code\": null,\n \"e\": 3349,\n \"s\": 3325,\n \"text\": \"t: our Turtle instance.\"\n },\n {\n \"code\": null,\n \"e\": 3516,\n \"s\": 3349,\n \"text\": \"iterations: represents the value of n in the image below this list (note that n=0 would represent a flat line, which will be the base case in our recursive function).\"\n },\n {\n \"code\": null,\n \"e\": 3580,\n \"s\": 3516,\n \"text\": \"length: the length of each side in our current (sub-)snowflake.\"\n },\n {\n \"code\": null,\n \"e\": 3693,\n \"s\": 3580,\n \"text\": \"shortening_factor: determines the factor by which the side length is divided when we create a new sub-snowflake.\"\n },\n {\n \"code\": null,\n \"e\": 3754,\n \"s\": 3693,\n \"text\": \"angle: determines the angle from which the new side emerges.\"\n },\n {\n \"code\": null,\n \"e\": 3856,\n \"s\": 3754,\n \"text\": \"Once we have defined the basic structure of our recursive function, we may reach the following point:\"\n },\n {\n \"code\": null,\n \"e\": 4052,\n \"s\": 3856,\n \"text\": \"import turtledef koch_curve(t, iterations, length, shortening_factor, angle): passt = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()\"\n },\n {\n \"code\": null,\n \"e\": 4452,\n \"s\": 4052,\n \"text\": \"At this point, we just have to implement the recursive function. If we have reached our base case, we’ll just draw a line. Otherwise, we’ll update our parameters (specifically, iterations and length) and call our recursive function 4 times. Between these function calls we’ll be turning first to the left, then to the right and finally to the left again. Let’s see how the full implementation looks:\"\n },\n {\n \"code\": null,\n \"e\": 5069,\n \"s\": 4452,\n \"text\": \"import turtledef koch_curve(t, iterations, length, shortening_factor, angle): if iterations == 0: t.forward(length) else: iterations = iterations - 1 length = length / shortening_factor koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle) t.right(angle * 2) koch_curve(t, iterations, length, shortening_factor, angle) t.left(angle) koch_curve(t, iterations, length, shortening_factor, angle)t = turtle.Turtle()t.hideturtle()for i in range(3): koch_curve(t, 4, 200, 3, 60) t.right(120)turtle.mainloop()\"\n },\n {\n \"code\": null,\n \"e\": 5140,\n \"s\": 5069,\n \"text\": \"If you execute the code above, you should obtain the following result:\"\n }\n]"}}},{"rowIdx":59,"cells":{"title":{"kind":"string","value":"Tryit Editor v3.7"},"text":{"kind":"string","value":"Tryit: Style the ::marker pseudo-element"},"parsed":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":60,"cells":{"title":{"kind":"string","value":"How to convert Integer array list to float array in Java?"},"text":{"kind":"string","value":"To convert integer array list to float array, let us first create an integer array list −\nArrayList < Integer > arrList = new ArrayList < Integer > ();\narrList.add(25);\narrList.add(50);\narrList.add(100);\narrList.add(200);\narrList.add(300);\narrList.add(400);\narrList.add(500);\nNow, convert integer array list to float array. We have first set the size to the float array. With that, each and every value of the integer array is assigned to the float array −\nfinal float[] arr = new float[arrList.size()];\nint index = 0;\nfor (final Integer value: arrList) {\n arr[index++] = value;\n}\n Live Demo\nimport java.util.ArrayList;\npublic class Demo {\n public static void main(String[] args) {\n ArrayListarrList = new ArrayList();\n arrList.add(25);\n arrList.add(50);\n arrList.add(100);\n arrList.add(200);\n arrList.add(300);\n arrList.add(400);\n arrList.add(500);\n final float[] arr = new float[arrList.size()];\n int index = 0;\n for (final Integer value: arrList) {\n arr[index++] = value;\n }\n System.out.println(\"Elements of float array...\");\n for (Float i: arr) {\n System.out.println(i);\n }\n }\n}\nElements of float array...\n25.0\n50.0\n100.0\n200.0\n300.0\n400.0\n500.0"},"parsed":{"kind":"list like","value":[{"code":null,"e":1152,"s":1062,"text":"To convert integer array list to float array, let us first create an integer array list −"},{"code":null,"e":1338,"s":1152,"text":"ArrayList < Integer > arrList = new ArrayList < Integer > ();\narrList.add(25);\narrList.add(50);\narrList.add(100);\narrList.add(200);\narrList.add(300);\narrList.add(400);\narrList.add(500);"},{"code":null,"e":1519,"s":1338,"text":"Now, convert integer array list to float array. We have first set the size to the float array. With that, each and every value of the integer array is assigned to the float array −"},{"code":null,"e":1645,"s":1519,"text":"final float[] arr = new float[arrList.size()];\nint index = 0;\nfor (final Integer value: arrList) {\n arr[index++] = value;\n}"},{"code":null,"e":1656,"s":1645,"text":" Live Demo"},{"code":null,"e":2260,"s":1656,"text":"import java.util.ArrayList;\npublic class Demo {\n public static void main(String[] args) {\n ArrayListarrList = new ArrayList();\n arrList.add(25);\n arrList.add(50);\n arrList.add(100);\n arrList.add(200);\n arrList.add(300);\n arrList.add(400);\n arrList.add(500);\n final float[] arr = new float[arrList.size()];\n int index = 0;\n for (final Integer value: arrList) {\n arr[index++] = value;\n }\n System.out.println(\"Elements of float array...\");\n for (Float i: arr) {\n System.out.println(i);\n }\n }\n}"},{"code":null,"e":2327,"s":2260,"text":"Elements of float array...\n25.0\n50.0\n100.0\n200.0\n300.0\n400.0\n500.0"}],"string":"[\n {\n \"code\": null,\n \"e\": 1152,\n \"s\": 1062,\n \"text\": \"To convert integer array list to float array, let us first create an integer array list −\"\n },\n {\n \"code\": null,\n \"e\": 1338,\n \"s\": 1152,\n \"text\": \"ArrayList < Integer > arrList = new ArrayList < Integer > ();\\narrList.add(25);\\narrList.add(50);\\narrList.add(100);\\narrList.add(200);\\narrList.add(300);\\narrList.add(400);\\narrList.add(500);\"\n },\n {\n \"code\": null,\n \"e\": 1519,\n \"s\": 1338,\n \"text\": \"Now, convert integer array list to float array. We have first set the size to the float array. With that, each and every value of the integer array is assigned to the float array −\"\n },\n {\n \"code\": null,\n \"e\": 1645,\n \"s\": 1519,\n \"text\": \"final float[] arr = new float[arrList.size()];\\nint index = 0;\\nfor (final Integer value: arrList) {\\n arr[index++] = value;\\n}\"\n },\n {\n \"code\": null,\n \"e\": 1656,\n \"s\": 1645,\n \"text\": \" Live Demo\"\n },\n {\n \"code\": null,\n \"e\": 2260,\n \"s\": 1656,\n \"text\": \"import java.util.ArrayList;\\npublic class Demo {\\n public static void main(String[] args) {\\n ArrayListarrList = new ArrayList();\\n arrList.add(25);\\n arrList.add(50);\\n arrList.add(100);\\n arrList.add(200);\\n arrList.add(300);\\n arrList.add(400);\\n arrList.add(500);\\n final float[] arr = new float[arrList.size()];\\n int index = 0;\\n for (final Integer value: arrList) {\\n arr[index++] = value;\\n }\\n System.out.println(\\\"Elements of float array...\\\");\\n for (Float i: arr) {\\n System.out.println(i);\\n }\\n }\\n}\"\n },\n {\n \"code\": null,\n \"e\": 2327,\n \"s\": 2260,\n \"text\": \"Elements of float array...\\n25.0\\n50.0\\n100.0\\n200.0\\n300.0\\n400.0\\n500.0\"\n }\n]"}}},{"rowIdx":61,"cells":{"title":{"kind":"string","value":"\\strut - Tex Command"},"text":{"kind":"string","value":"\\strut - Used to create an invisible box with no width, height 8.6pt and depth 3pt.\n{ \\strut}\n\\strut command creates an invisible box with no width, height 8.6pt and depth 3pt.\n\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n 14 Lectures \n 52 mins\n\n Ashraf Said\n\n 11 Lectures \n 1 hours \n\n Ashraf Said\n\n 9 Lectures \n 1 hours \n\n Emenwa Global, Ejike IfeanyiChukwu\n\n 29 Lectures \n 2.5 hours \n\n Mohammad Nauman\n\n 14 Lectures \n 1 hours \n\n Daniel Stern\n\n 15 Lectures \n 47 mins\n\n Nishant Kumar\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":8070,"s":7986,"text":"\\strut - Used to create an invisible box with no width, height 8.6pt and depth 3pt."},{"code":null,"e":8080,"s":8070,"text":"{ \\strut}"},{"code":null,"e":8163,"s":8080,"text":"\\strut command creates an invisible box with no width, height 8.6pt and depth 3pt."},{"code":null,"e":8445,"s":8163,"text":"\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n\n"},{"code":null,"e":8534,"s":8445,"text":"\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n"},{"code":null,"e":8601,"s":8534,"text":"\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n"},{"code":null,"e":8696,"s":8601,"text":"\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n"},{"code":null,"e":8769,"s":8696,"text":"\\Tiny\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n"},{"code":null,"e":8865,"s":8769,"text":"\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n\n\n( )(mathstrutstrut\n\n"},{"code":null,"e":8939,"s":8865,"text":"\\Large\n\\sqrt{(\\ )}\n\\sqrt{\\mathstrut\\rm mathstrut}\n\\sqrt{\\strut\\rm strut}\n"},{"code":null,"e":8971,"s":8939,"text":"\n 14 Lectures \n 52 mins\n"},{"code":null,"e":8984,"s":8971,"text":" Ashraf Said"},{"code":null,"e":9017,"s":8984,"text":"\n 11 Lectures \n 1 hours \n"},{"code":null,"e":9030,"s":9017,"text":" Ashraf Said"},{"code":null,"e":9062,"s":9030,"text":"\n 9 Lectures \n 1 hours \n"},{"code":null,"e":9098,"s":9062,"text":" Emenwa Global, Ejike IfeanyiChukwu"},{"code":null,"e":9133,"s":9098,"text":"\n 29 Lectures \n 2.5 hours \n"},{"code":null,"e":9150,"s":9133,"text":" Mohammad Nauman"},{"code":null,"e":9183,"s":9150,"text":"\n 14 Lectures \n 1 hours \n"},{"code":null,"e":9197,"s":9183,"text":" Daniel Stern"},{"code":null,"e":9229,"s":9197,"text":"\n 15 Lectures \n 47 mins\n"},{"code":null,"e":9244,"s":9229,"text":" Nishant Kumar"},{"code":null,"e":9251,"s":9244,"text":" Print"},{"code":null,"e":9262,"s":9251,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 8070,\n \"s\": 7986,\n \"text\": \"\\\\strut - Used to create an invisible box with no width, height 8.6pt and depth 3pt.\"\n },\n {\n \"code\": null,\n \"e\": 8080,\n \"s\": 8070,\n \"text\": \"{ \\\\strut}\"\n },\n {\n \"code\": null,\n \"e\": 8163,\n \"s\": 8080,\n \"text\": \"\\\\strut command creates an invisible box with no width, height 8.6pt and depth 3pt.\"\n },\n {\n \"code\": null,\n \"e\": 8445,\n \"s\": 8163,\n \"text\": \"\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\\n\\\\Tiny\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\\n\\\\Large\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 8534,\n \"s\": 8445,\n \"text\": \"\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 8601,\n \"s\": 8534,\n \"text\": \"\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\"\n },\n {\n \"code\": null,\n \"e\": 8696,\n \"s\": 8601,\n \"text\": \"\\\\Tiny\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 8769,\n \"s\": 8696,\n \"text\": \"\\\\Tiny\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\"\n },\n {\n \"code\": null,\n \"e\": 8865,\n \"s\": 8769,\n \"text\": \"\\\\Large\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\\n\\n( )(mathstrutstrut\\n\\n\"\n },\n {\n \"code\": null,\n \"e\": 8939,\n \"s\": 8865,\n \"text\": \"\\\\Large\\n\\\\sqrt{(\\\\ )}\\n\\\\sqrt{\\\\mathstrut\\\\rm mathstrut}\\n\\\\sqrt{\\\\strut\\\\rm strut}\\n\"\n },\n {\n \"code\": null,\n \"e\": 8971,\n \"s\": 8939,\n \"text\": \"\\n 14 Lectures \\n 52 mins\\n\"\n },\n {\n \"code\": null,\n \"e\": 8984,\n \"s\": 8971,\n \"text\": \" Ashraf Said\"\n },\n {\n \"code\": null,\n \"e\": 9017,\n \"s\": 8984,\n \"text\": \"\\n 11 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 9030,\n \"s\": 9017,\n \"text\": \" Ashraf Said\"\n },\n {\n \"code\": null,\n \"e\": 9062,\n \"s\": 9030,\n \"text\": \"\\n 9 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 9098,\n \"s\": 9062,\n \"text\": \" Emenwa Global, Ejike IfeanyiChukwu\"\n },\n {\n \"code\": null,\n \"e\": 9133,\n \"s\": 9098,\n \"text\": \"\\n 29 Lectures \\n 2.5 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 9150,\n \"s\": 9133,\n \"text\": \" Mohammad Nauman\"\n },\n {\n \"code\": null,\n \"e\": 9183,\n \"s\": 9150,\n \"text\": \"\\n 14 Lectures \\n 1 hours \\n\"\n },\n {\n \"code\": null,\n \"e\": 9197,\n \"s\": 9183,\n \"text\": \" Daniel Stern\"\n },\n {\n \"code\": null,\n \"e\": 9229,\n \"s\": 9197,\n \"text\": \"\\n 15 Lectures \\n 47 mins\\n\"\n },\n {\n \"code\": null,\n \"e\": 9244,\n \"s\": 9229,\n \"text\": \" Nishant Kumar\"\n },\n {\n \"code\": null,\n \"e\": 9251,\n \"s\": 9244,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 9262,\n \"s\": 9251,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":62,"cells":{"title":{"kind":"string","value":"Constants in C/C++ - GeeksforGeeks"},"text":{"kind":"string","value":"03 Aug, 2021\nAs the name suggests the name constants are given to such variables or values in C/C++ programming language which cannot be modified once they are defined. They are fixed values in a program. There can be any types of constants like integer, float, octal, hexadecimal, character constants, etc. Every constant has some range. The integers that are too big to fit into an int will be taken as long. Now there are various ranges that differ from unsigned to signed bits. Under the signed bit, the range of an int varies from -128 to +127, and under the unsigned bit, int varies from 0 to 255. \nDefining Constants: In C/C++ program we can define constants in two ways as shown below: \nUsing #define preprocessor directiveUsing a const keyword\nUsing #define preprocessor directive\nUsing a const keyword\nLiterals: The values assigned to each constant variables are referred to as the literals. Generally, both terms, constants and literals are used interchangeably. For eg, “const int = 5;“, is a constant expression and the value 5 is referred to as constant integer literal. Refer here for various Types of Literals in C++.Let us now learn about above two ways in details: \nUsing #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName.using a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword.\nUsing #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName.\n#define identifierName value\nidentifierName: It is the name given to constant.\nvalue: This refers to any value assigned to identifierName.\nusing a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword.\nBelow program shows how to use const to declare constants of different data types:\nC\nC++\n#include int main(){ // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const char stringVal[10] = \"ABC\"; printf(\"Integer constant:%d \\n\", intVal ); printf(\"Floating point constant: %.2f\\n\", floatVal ); printf(\"Character constant: %c\\n\", charVal ); printf(\"String constant: %s\\n\", stringVal); return 0;}\n#include using namespace std; int main() { // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const string stringVal = \"ABC\"; cout << \"Integer Constant: \" << intVal << \"\\n\"; cout << \"Floating point Constant: \" << floatVal << \"\\n\"; cout << \"Character Constant: \"<< charVal << \"\\n\"; cout << \"String Constant: \"<< stringVal << \"\\n\"; return 0; }\nInteger constant: 10 \nFloating point constant: 4.14\nCharacter constant: A \nString constant: ABC \nRefer Const Qualifier in C for details.\nThis article is contributed by Chinmoy Lenka. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. \nsaurabh1990aror\nsooda367\nCBSE - Class 11\nschool-programming\nC Language\nWriting code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here.\nComments\nOld Comments\nMultidimensional Arrays in C / C++\nrand() and srand() in C/C++\nCommand line arguments in C/C++\nLeft Shift and Right Shift Operators in C/C++\nfork() in C\nDifferent methods to reverse a string in C/C++\nSubstring in C++\nFunction Pointer in C\nTCP Server-Client implementation in C\nEnumeration (or enum) in C"},"parsed":{"kind":"list like","value":[{"code":null,"e":24362,"s":24334,"text":"\n03 Aug, 2021"},{"code":null,"e":24955,"s":24362,"text":"As the name suggests the name constants are given to such variables or values in C/C++ programming language which cannot be modified once they are defined. They are fixed values in a program. There can be any types of constants like integer, float, octal, hexadecimal, character constants, etc. Every constant has some range. The integers that are too big to fit into an int will be taken as long. Now there are various ranges that differ from unsigned to signed bits. Under the signed bit, the range of an int varies from -128 to +127, and under the unsigned bit, int varies from 0 to 255. "},{"code":null,"e":25046,"s":24955,"text":"Defining Constants: In C/C++ program we can define constants in two ways as shown below: "},{"code":null,"e":25104,"s":25046,"text":"Using #define preprocessor directiveUsing a const keyword"},{"code":null,"e":25141,"s":25104,"text":"Using #define preprocessor directive"},{"code":null,"e":25163,"s":25141,"text":"Using a const keyword"},{"code":null,"e":25536,"s":25163,"text":"Literals: The values assigned to each constant variables are referred to as the literals. Generally, both terms, constants and literals are used interchangeably. For eg, “const int = 5;“, is a constant expression and the value 5 is referred to as constant integer literal. Refer here for various Types of Literals in C++.Let us now learn about above two ways in details: "},{"code":null,"e":26026,"s":25536,"text":"Using #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName.using a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword."},{"code":null,"e":26338,"s":26026,"text":"Using #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName."},{"code":null,"e":26367,"s":26338,"text":"#define identifierName value"},{"code":null,"e":26417,"s":26367,"text":"identifierName: It is the name given to constant."},{"code":null,"e":26477,"s":26417,"text":"value: This refers to any value assigned to identifierName."},{"code":null,"e":26656,"s":26477,"text":"using a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword."},{"code":null,"e":26739,"s":26656,"text":"Below program shows how to use const to declare constants of different data types:"},{"code":null,"e":26741,"s":26739,"text":"C"},{"code":null,"e":26745,"s":26741,"text":"C++"},{"code":"#include int main(){ // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const char stringVal[10] = \"ABC\"; printf(\"Integer constant:%d \\n\", intVal ); printf(\"Floating point constant: %.2f\\n\", floatVal ); printf(\"Character constant: %c\\n\", charVal ); printf(\"String constant: %s\\n\", stringVal); return 0;}","e":27218,"s":26745,"text":null},{"code":"#include using namespace std; int main() { // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const string stringVal = \"ABC\"; cout << \"Integer Constant: \" << intVal << \"\\n\"; cout << \"Floating point Constant: \" << floatVal << \"\\n\"; cout << \"Character Constant: \"<< charVal << \"\\n\"; cout << \"String Constant: \"<< stringVal << \"\\n\"; return 0; }","e":27735,"s":27218,"text":null},{"code":null,"e":27832,"s":27735,"text":"Integer constant: 10 \nFloating point constant: 4.14\nCharacter constant: A \nString constant: ABC "},{"code":null,"e":27872,"s":27832,"text":"Refer Const Qualifier in C for details."},{"code":null,"e":28294,"s":27872,"text":"This article is contributed by Chinmoy Lenka. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "},{"code":null,"e":28310,"s":28294,"text":"saurabh1990aror"},{"code":null,"e":28319,"s":28310,"text":"sooda367"},{"code":null,"e":28335,"s":28319,"text":"CBSE - Class 11"},{"code":null,"e":28354,"s":28335,"text":"school-programming"},{"code":null,"e":28365,"s":28354,"text":"C Language"},{"code":null,"e":28463,"s":28365,"text":"Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."},{"code":null,"e":28472,"s":28463,"text":"Comments"},{"code":null,"e":28485,"s":28472,"text":"Old Comments"},{"code":null,"e":28520,"s":28485,"text":"Multidimensional Arrays in C / C++"},{"code":null,"e":28548,"s":28520,"text":"rand() and srand() in C/C++"},{"code":null,"e":28580,"s":28548,"text":"Command line arguments in C/C++"},{"code":null,"e":28626,"s":28580,"text":"Left Shift and Right Shift Operators in C/C++"},{"code":null,"e":28638,"s":28626,"text":"fork() in C"},{"code":null,"e":28685,"s":28638,"text":"Different methods to reverse a string in C/C++"},{"code":null,"e":28702,"s":28685,"text":"Substring in C++"},{"code":null,"e":28724,"s":28702,"text":"Function Pointer in C"},{"code":null,"e":28762,"s":28724,"text":"TCP Server-Client implementation in C"}],"string":"[\n {\n \"code\": null,\n \"e\": 24362,\n \"s\": 24334,\n \"text\": \"\\n03 Aug, 2021\"\n },\n {\n \"code\": null,\n \"e\": 24955,\n \"s\": 24362,\n \"text\": \"As the name suggests the name constants are given to such variables or values in C/C++ programming language which cannot be modified once they are defined. They are fixed values in a program. There can be any types of constants like integer, float, octal, hexadecimal, character constants, etc. Every constant has some range. The integers that are too big to fit into an int will be taken as long. Now there are various ranges that differ from unsigned to signed bits. Under the signed bit, the range of an int varies from -128 to +127, and under the unsigned bit, int varies from 0 to 255. \"\n },\n {\n \"code\": null,\n \"e\": 25046,\n \"s\": 24955,\n \"text\": \"Defining Constants: In C/C++ program we can define constants in two ways as shown below: \"\n },\n {\n \"code\": null,\n \"e\": 25104,\n \"s\": 25046,\n \"text\": \"Using #define preprocessor directiveUsing a const keyword\"\n },\n {\n \"code\": null,\n \"e\": 25141,\n \"s\": 25104,\n \"text\": \"Using #define preprocessor directive\"\n },\n {\n \"code\": null,\n \"e\": 25163,\n \"s\": 25141,\n \"text\": \"Using a const keyword\"\n },\n {\n \"code\": null,\n \"e\": 25536,\n \"s\": 25163,\n \"text\": \"Literals: The values assigned to each constant variables are referred to as the literals. Generally, both terms, constants and literals are used interchangeably. For eg, “const int = 5;“, is a constant expression and the value 5 is referred to as constant integer literal. Refer here for various Types of Literals in C++.Let us now learn about above two ways in details: \"\n },\n {\n \"code\": null,\n \"e\": 26026,\n \"s\": 25536,\n \"text\": \"Using #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName.using a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword.\"\n },\n {\n \"code\": null,\n \"e\": 26338,\n \"s\": 26026,\n \"text\": \"Using #define preprocessor directive: This directive is used to declare an alias name for existing variable or any value. We can use this to declare a constant as shown below:#define identifierName valueidentifierName: It is the name given to constant.value: This refers to any value assigned to identifierName.\"\n },\n {\n \"code\": null,\n \"e\": 26367,\n \"s\": 26338,\n \"text\": \"#define identifierName value\"\n },\n {\n \"code\": null,\n \"e\": 26417,\n \"s\": 26367,\n \"text\": \"identifierName: It is the name given to constant.\"\n },\n {\n \"code\": null,\n \"e\": 26477,\n \"s\": 26417,\n \"text\": \"value: This refers to any value assigned to identifierName.\"\n },\n {\n \"code\": null,\n \"e\": 26656,\n \"s\": 26477,\n \"text\": \"using a const keyword: Using const keyword to define constants is as simple as defining variables, the difference is you will have to precede the definition with a const keyword.\"\n },\n {\n \"code\": null,\n \"e\": 26739,\n \"s\": 26656,\n \"text\": \"Below program shows how to use const to declare constants of different data types:\"\n },\n {\n \"code\": null,\n \"e\": 26741,\n \"s\": 26739,\n \"text\": \"C\"\n },\n {\n \"code\": null,\n \"e\": 26745,\n \"s\": 26741,\n \"text\": \"C++\"\n },\n {\n \"code\": \"#include int main(){ // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const char stringVal[10] = \\\"ABC\\\"; printf(\\\"Integer constant:%d \\\\n\\\", intVal ); printf(\\\"Floating point constant: %.2f\\\\n\\\", floatVal ); printf(\\\"Character constant: %c\\\\n\\\", charVal ); printf(\\\"String constant: %s\\\\n\\\", stringVal); return 0;}\",\n \"e\": 27218,\n \"s\": 26745,\n \"text\": null\n },\n {\n \"code\": \"#include using namespace std; int main() { // int constant const int intVal = 10; // Real constant const float floatVal = 4.14; // char constant const char charVal = 'A'; // string constant const string stringVal = \\\"ABC\\\"; cout << \\\"Integer Constant: \\\" << intVal << \\\"\\\\n\\\"; cout << \\\"Floating point Constant: \\\" << floatVal << \\\"\\\\n\\\"; cout << \\\"Character Constant: \\\"<< charVal << \\\"\\\\n\\\"; cout << \\\"String Constant: \\\"<< stringVal << \\\"\\\\n\\\"; return 0; }\",\n \"e\": 27735,\n \"s\": 27218,\n \"text\": null\n },\n {\n \"code\": null,\n \"e\": 27832,\n \"s\": 27735,\n \"text\": \"Integer constant: 10 \\nFloating point constant: 4.14\\nCharacter constant: A \\nString constant: ABC \"\n },\n {\n \"code\": null,\n \"e\": 27872,\n \"s\": 27832,\n \"text\": \"Refer Const Qualifier in C for details.\"\n },\n {\n \"code\": null,\n \"e\": 28294,\n \"s\": 27872,\n \"text\": \"This article is contributed by Chinmoy Lenka. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. \"\n },\n {\n \"code\": null,\n \"e\": 28310,\n \"s\": 28294,\n \"text\": \"saurabh1990aror\"\n },\n {\n \"code\": null,\n \"e\": 28319,\n \"s\": 28310,\n \"text\": \"sooda367\"\n },\n {\n \"code\": null,\n \"e\": 28335,\n \"s\": 28319,\n \"text\": \"CBSE - Class 11\"\n },\n {\n \"code\": null,\n \"e\": 28354,\n \"s\": 28335,\n \"text\": \"school-programming\"\n },\n {\n \"code\": null,\n \"e\": 28365,\n \"s\": 28354,\n \"text\": \"C Language\"\n },\n {\n \"code\": null,\n \"e\": 28463,\n \"s\": 28365,\n \"text\": \"Writing code in comment?\\nPlease use ide.geeksforgeeks.org,\\ngenerate link and share the link here.\"\n },\n {\n \"code\": null,\n \"e\": 28472,\n \"s\": 28463,\n \"text\": \"Comments\"\n },\n {\n \"code\": null,\n \"e\": 28485,\n \"s\": 28472,\n \"text\": \"Old Comments\"\n },\n {\n \"code\": null,\n \"e\": 28520,\n \"s\": 28485,\n \"text\": \"Multidimensional Arrays in C / C++\"\n },\n {\n \"code\": null,\n \"e\": 28548,\n \"s\": 28520,\n \"text\": \"rand() and srand() in C/C++\"\n },\n {\n \"code\": null,\n \"e\": 28580,\n \"s\": 28548,\n \"text\": \"Command line arguments in C/C++\"\n },\n {\n \"code\": null,\n \"e\": 28626,\n \"s\": 28580,\n \"text\": \"Left Shift and Right Shift Operators in C/C++\"\n },\n {\n \"code\": null,\n \"e\": 28638,\n \"s\": 28626,\n \"text\": \"fork() in C\"\n },\n {\n \"code\": null,\n \"e\": 28685,\n \"s\": 28638,\n \"text\": \"Different methods to reverse a string in C/C++\"\n },\n {\n \"code\": null,\n \"e\": 28702,\n \"s\": 28685,\n \"text\": \"Substring in C++\"\n },\n {\n \"code\": null,\n \"e\": 28724,\n \"s\": 28702,\n \"text\": \"Function Pointer in C\"\n },\n {\n \"code\": null,\n \"e\": 28762,\n \"s\": 28724,\n \"text\": \"TCP Server-Client implementation in C\"\n }\n]"}}},{"rowIdx":63,"cells":{"title":{"kind":"string","value":"ggplot2 - Bubble Plots & Count Charts"},"text":{"kind":"string","value":"Bubble plots are nothing but bubble charts which is basically a scatter plot with a third numeric variable used for circle size. In this chapter, we will focus on creation of bar count plot and histogram count plots which is considered as replica of bubble plots.\nFollowing steps are used to create bubble plots and count charts with mentioned package −\nLoad the respective package and the required dataset to create the bubble plots and count charts.\n> # Load ggplot\n> library(ggplot2)\n>\n> # Read in dataset\n> data(mpg)\n> head(mpg)\n# A tibble: 6 x 11\nmanufacturer model displ year cyl trans drv cty hwy fl class\n \n1 audi a4 1.8 1999 4 auto(l5) f 18 29 p compa~\n2 audi a4 1.8 1999 4 manual(m5) f 21 29 p compa~\n3 audi a4 2 2008 4 manual(m6) f 20 31 p compa~\n4 audi a4 2 2008 4 auto(av) f 21 30 p compa~\n5 audi a4 2.8 1999 6 auto(l5) f 16 26 p compa~\n6 audi a4 2.8 1999 6 manual(m5) f 18 26 p compa~\n\nThe bar count plot can be created using the following command −\n> # A bar count plot\n> p <- ggplot(mpg, aes(x=factor(cyl)))+\n+ geom_bar(stat=\"count\")\n> p\n\nThe histogram count plot can be created using the following command −\n> # A historgram count plot\n> ggplot(data=mpg, aes(x=hwy)) +\n+ geom_histogram( col=\"red\",\n+ fill=\"green\",\n+ alpha = .2,\n+ binwidth = 5)\n\nNow let us create the most basic bubble plot with the required attributes of increasing the dimension of points mentioned in scattered plot.\nggplot(mpg, aes(x=cty, y=hwy, size = pop)) +geom_point(alpha=0.7)\n\nThe plot describes the nature of manufacturers which is included in legend format. The values represented include various dimensions of “hwy” attribute.\n Print\n Add Notes\n Bookmark this page"},"parsed":{"kind":"list like","value":[{"code":null,"e":2286,"s":2022,"text":"Bubble plots are nothing but bubble charts which is basically a scatter plot with a third numeric variable used for circle size. In this chapter, we will focus on creation of bar count plot and histogram count plots which is considered as replica of bubble plots."},{"code":null,"e":2376,"s":2286,"text":"Following steps are used to create bubble plots and count charts with mentioned package −"},{"code":null,"e":2474,"s":2376,"text":"Load the respective package and the required dataset to create the bubble plots and count charts."},{"code":null,"e":3179,"s":2474,"text":"> # Load ggplot\n> library(ggplot2)\n>\n> # Read in dataset\n> data(mpg)\n> head(mpg)\n# A tibble: 6 x 11\nmanufacturer model displ year cyl trans drv cty hwy fl class\n \n1 audi a4 1.8 1999 4 auto(l5) f 18 29 p compa~\n2 audi a4 1.8 1999 4 manual(m5) f 21 29 p compa~\n3 audi a4 2 2008 4 manual(m6) f 20 31 p compa~\n4 audi a4 2 2008 4 auto(av) f 21 30 p compa~\n5 audi a4 2.8 1999 6 auto(l5) f 16 26 p compa~\n6 audi a4 2.8 1999 6 manual(m5) f 18 26 p compa~\n"},{"code":null,"e":3243,"s":3179,"text":"The bar count plot can be created using the following command −"},{"code":null,"e":3334,"s":3243,"text":"> # A bar count plot\n> p <- ggplot(mpg, aes(x=factor(cyl)))+\n+ geom_bar(stat=\"count\")\n> p\n"},{"code":null,"e":3404,"s":3334,"text":"The histogram count plot can be created using the following command −"},{"code":null,"e":3562,"s":3404,"text":"> # A historgram count plot\n> ggplot(data=mpg, aes(x=hwy)) +\n+ geom_histogram( col=\"red\",\n+ fill=\"green\",\n+ alpha = .2,\n+ binwidth = 5)\n"},{"code":null,"e":3703,"s":3562,"text":"Now let us create the most basic bubble plot with the required attributes of increasing the dimension of points mentioned in scattered plot."},{"code":null,"e":3770,"s":3703,"text":"ggplot(mpg, aes(x=cty, y=hwy, size = pop)) +geom_point(alpha=0.7)\n"},{"code":null,"e":3923,"s":3770,"text":"The plot describes the nature of manufacturers which is included in legend format. The values represented include various dimensions of “hwy” attribute."},{"code":null,"e":3930,"s":3923,"text":" Print"},{"code":null,"e":3941,"s":3930,"text":" Add Notes"}],"string":"[\n {\n \"code\": null,\n \"e\": 2286,\n \"s\": 2022,\n \"text\": \"Bubble plots are nothing but bubble charts which is basically a scatter plot with a third numeric variable used for circle size. In this chapter, we will focus on creation of bar count plot and histogram count plots which is considered as replica of bubble plots.\"\n },\n {\n \"code\": null,\n \"e\": 2376,\n \"s\": 2286,\n \"text\": \"Following steps are used to create bubble plots and count charts with mentioned package −\"\n },\n {\n \"code\": null,\n \"e\": 2474,\n \"s\": 2376,\n \"text\": \"Load the respective package and the required dataset to create the bubble plots and count charts.\"\n },\n {\n \"code\": null,\n \"e\": 3179,\n \"s\": 2474,\n \"text\": \"> # Load ggplot\\n> library(ggplot2)\\n>\\n> # Read in dataset\\n> data(mpg)\\n> head(mpg)\\n# A tibble: 6 x 11\\nmanufacturer model displ year cyl trans drv cty hwy fl class\\n \\n1 audi a4 1.8 1999 4 auto(l5) f 18 29 p compa~\\n2 audi a4 1.8 1999 4 manual(m5) f 21 29 p compa~\\n3 audi a4 2 2008 4 manual(m6) f 20 31 p compa~\\n4 audi a4 2 2008 4 auto(av) f 21 30 p compa~\\n5 audi a4 2.8 1999 6 auto(l5) f 16 26 p compa~\\n6 audi a4 2.8 1999 6 manual(m5) f 18 26 p compa~\\n\"\n },\n {\n \"code\": null,\n \"e\": 3243,\n \"s\": 3179,\n \"text\": \"The bar count plot can be created using the following command −\"\n },\n {\n \"code\": null,\n \"e\": 3334,\n \"s\": 3243,\n \"text\": \"> # A bar count plot\\n> p <- ggplot(mpg, aes(x=factor(cyl)))+\\n+ geom_bar(stat=\\\"count\\\")\\n> p\\n\"\n },\n {\n \"code\": null,\n \"e\": 3404,\n \"s\": 3334,\n \"text\": \"The histogram count plot can be created using the following command −\"\n },\n {\n \"code\": null,\n \"e\": 3562,\n \"s\": 3404,\n \"text\": \"> # A historgram count plot\\n> ggplot(data=mpg, aes(x=hwy)) +\\n+ geom_histogram( col=\\\"red\\\",\\n+ fill=\\\"green\\\",\\n+ alpha = .2,\\n+ binwidth = 5)\\n\"\n },\n {\n \"code\": null,\n \"e\": 3703,\n \"s\": 3562,\n \"text\": \"Now let us create the most basic bubble plot with the required attributes of increasing the dimension of points mentioned in scattered plot.\"\n },\n {\n \"code\": null,\n \"e\": 3770,\n \"s\": 3703,\n \"text\": \"ggplot(mpg, aes(x=cty, y=hwy, size = pop)) +geom_point(alpha=0.7)\\n\"\n },\n {\n \"code\": null,\n \"e\": 3923,\n \"s\": 3770,\n \"text\": \"The plot describes the nature of manufacturers which is included in legend format. The values represented include various dimensions of “hwy” attribute.\"\n },\n {\n \"code\": null,\n \"e\": 3930,\n \"s\": 3923,\n \"text\": \" Print\"\n },\n {\n \"code\": null,\n \"e\": 3941,\n \"s\": 3930,\n \"text\": \" Add Notes\"\n }\n]"}}},{"rowIdx":64,"cells":{"title":{"kind":"string","value":"Google AMP - Html Page to Amp Page"},"text":{"kind":"string","value":"In this chapter, we will understand how to convert a normal html page to an amp page. We will also validate the page for amp and check the output at the last.\nTo start with, let us take the normal html page as shown below −\n\n\n \n \n Tutorials\n \n \n \n \n \n
\n

Tutorials

\n
\n

Some Important Tutorials List

\n
\n
\n \n
\n
\n \n
\n
\n \n
\n
\n \n
\n
\n \n \n\nNote that we are using style.css in it and the details of the css file are as given here −\nh1 {color: blue;text-align: center;}\n h2 {text-align: center;}\n img {\n border: 1px solid #ddd;\n border-radius: 4px;\n padding: 5px;\n }\n article {\n text-align: center;\n }\n header{\n width: 100%;\n height: 50px;\n margin: 5px auto;\n border: 1px solid #000000;\n text-align: center;\n background-color: #ccc;\n }\n footer {\n width: 100%;\n height: 35px;\n margin: 5px auto;\n border: 1px solid #000000;\n text-align: center;\n background-color: yellow;\n }\nNote that we have also used jquery.js file in the .html listed above.\nNow, host test.html locally and see the output seen in link given here −\nhttp://localhost:8080/googleamp/test.html\nNow, let us go step-by-step to change above test.html file to test_amp.html file.\nFirst, we have to save test.html as test_amp.html and follow the steps given below.\nStep 1 − Add the amp library in the head section as shown below −\n\n\nFor example, once added to test_amp.html, it will be as follows −\n\n \n Tutorials\n \n \n \n \n\n\nNow run the page test_amp.html in the browser and open the browser console. It will display the console message as shown below −\nTo know if your html file is a valid amp add #development=1 to your html page url at the end as shown below −\nhttp://localhost:8080/googleamp/test_amp.html#development=1\n\nHit the above url in the browser and in the Google Chrome console. It will list you errors which amp thinks are invalid from amp specification point of view.\nThe errors we have got for test_amp.html are shown here −\nLet us now fix them one by one till we get amp successful message.\nStep 2 − We can see the following error in the console −\nWe can fix that by adding ⚡ or amp for the html tag. We will add amp to html tag as shown below −\n\n\nStep 3 − Please make sure you have the meta tag with charset and name=”viewport” in the head tag as shown below −\n\n \n \n\n\nStep 4 − The next error that we have is shown here −\nIt says href in link rel=stylesheet ie the following link is throwing error. This is because amp does not allow external stylesheet using link with href to be put inside pages.\n\n\nWe can add the all the css in style.css as follows −\n\n\nSo the css data present in style.css has to be added in style with amp-custom attribute.\n\nAdd the style tag to your amp page. Let us now test the same with the above style tag in the browser. The changes we have done so far to test_amp.html are shown here −\n\n\n \n \n Tutorials\n \n \n \n \n \n \n
\n

Tutorials

\n
\n

Some Important Tutorials List

\n
\n
\n \n
\n
\n \n
\n
\n \n
\n
\n \n
\n
\n \n \n\nLet us see the output and errors in console for above page. Observe the following screenshot −\nThe error shown in the console is as follows −\nNow, you can see that for some of the errors for amp, style is removed. Let us fix the remaining errors now.\nStep 5 − The next error we see in the list is as follows −\nWe have added the script tag calling jquery file. Note that amp pages do not allow any custom javascript in the page. We will have to remove it and make sure to use amp-component which is available.\nFor example, we have amp-animation if any animation is required, amp-analytics incase we want to add google analytics code to the page. Similarly, we have amp-ad component to display ads to be shown on the page. There is also an amp-iframe component which we can point the src to same origin and call any custom javascript if required in the amp-iframe.\nNow, let us remove the script tag from the page.\nStep 6 − The next error displayed is shown here −\nThe above errors are pointing to the image tag we have used on the page. Amp does not allow tags to be used inside the page. Note that we need to use amp-img tag instead.\nLet us replace tag with as shown here −\n
\n \n \n
\n
\n \n \n
\n
\n \n \n
\n
\n \n \n
\nWe have replaced all the tag to as shown above. Now, let us run the page in the browser to see output and errors −\nObserve that the errors are getting less now.\nStep 7 − The next error displayed in console is as follows −\nWe need to add link rel=canonical tag in the head section. Please note this is a mandatory tag and should always be added in the head as follows −\n\n\nStep 8 − The next error displayed in for missing noscript tag in the console as shown here −\nWe need to add