{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"\nsoup = BeautifulSoup(html_doc, \"html.parser\")\nprint(soup.prettify())\nsoup = BeautifulSoup(html_doc, \"html.parser\")\nh1_tag = soup.find(\"p\")\nprint(h1_tag.text)\nimport pandas as pd\nimport numpy as np\n\nnn = pd.DataFrame({\"name\": [\"Alice\", \"mois\", \"moin\"], \"age\": [12, 35, 56]})\nnn\narr = np.array([1, 2, 3, 4, 5])\narr_sqrt = arr**2\nprint(arr_sqrt)\narr_sqrt.mean()\nnp.arange(10)\nnazar = np.arange(10)\nmm = nazar.reshape(2, 5)\nprint(mm)\nmaster = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nhh = np.transpose(master)\nprint(hh)\nimport numpy as np\nimport pandas as pd\n\narr = np.array([1, 2, 3, 4, 5])\narr\narr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nnn = np.transpose(arr)\nnn\nar = np.array([[9, 8, 0], [7, 3, 5]])\narr = ar.flatten()\nprint(arr)\narr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nrow = arr2d[1, 0:2]\nprint(row)\narr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])\nn = arr[0, :, :]\nprint(n)\nnn = np.array([[1, 2, 3], [4, 5, 6], [8, 9, 10]])\nt = nn[1, 0]\nprint(t)\nimport math\n\n\ndef is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef find_prime_length_words(sentence):\n words = sentence.split()\n prime_length_words = []\n for word in words:\n if is_prime(len(word)):\n prime_length_words.append(word)\n return prime_length_words\n\n\n# Example usage\ninput_sentence = \"The quick brown fox jumps over the lazy dog.\"\noutput_words = find_prime_length_words(input_sentence)\nprint(\"Output:\", \" \".join(output_words))\nimport math\n\n\ndef is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef find_prime_length_words(sentence):\n words = sentence.split()\n for word in words:\n if is_prime(len(word)):\n prime_length_words.append(words)\n return prime_length_words\n\n\n# Example usage\ninput_sentence = \"The quick brown fox jumps over the lazy dog\"\noutput_words = find_prime_length_words(input_sentence)\nprint(\"output:\", \"\".join(output_words))\n\n\ndef print_poem(poem):\n lines = poem.split(\"\\n\")\n for line in lines:\n line = line.strip()\n if line:\n print(\"\\t\" + line)\n print(\"\\t\\t\" + \"\\t\".join(line.split()))\n\n\n# Example usage\ninput_poem = \"\"\"Twinkle, twinkle, little star,\nHow I wonder what you are!\nUp above the world so high,\nLike a diamond in the sky.\nTwinkle, twinkle, little star,\nHow I wonder what you are!\"\"\"\nprint_poem(input_poem)\npoem = \"Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!\"\n\n\ndef poem_print(poem):\n lines = poem.split(\"\\n\")\n for line in lines:\n line = line.strip()\n if line:\n print(\"\\t\" + line)\n print(\"\\t\\t\" + \"\\t\".join(line.strip()))\n\n\ninput_poem = \"Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!\"\npoem_print(input_poem)\n\n\ndef send_email_greeting(first_name, last_name):\n greeting = \"Hello \" + last_name + \" \" + first_name\n return greeting\n\n\n# Example usage\nfirst_name = \"Dany\"\nlast_name = \"Boon\"\nemail_greeting = send_email_greeting(first_name, last_name)\nprint(email_greeting)\n\n\ndef master(first, last):\n m = \"Hello \" + last + \" \" + first\n return m\n\n\nfirst = \"Nazar\"\nlast = \"Mohammed\"\nemail = master(first, last)\nprint(email)\nr = 6\na = 4 / 3 * 3.142 * r**3\na\n\n\ndef volume(r):\n a = 4 / 3 * 3.1415926535897931 * r**3\n return a\n\n\nvolume(6)\nimport math\n\n\ndef calculate_sphere_volume(radius):\n volume = (4 / 3) * math.pi * radius**3\n return volume\n\n\nradius = 6.0\nsphere_volume = calculate_sphere_volume(radius)\nprint(sphere_volume)\nimport math\n\n\ndef calc(r):\n volume = (4 / 3) * math.pi * radius**3\n return volume\n\n\nr = 6\nsphere = calc(r)\nprint(sphere)\nimport math\n\n\ndef nazar(r):\n v = (4 / 3) * math.pi * r**3\n return v\n\n\ntt = 6.0\nv = nazar(tt)\nprint(v)\n\n\ndef pp(x, y):\n a = (x + y) * (x + y)\n return a\n\n\nx = int(input())\ny = int(input())\nnn = pp(x, y)\nprint(nn)\n\n\ndef cac(a, b, c):\n nn = a + b + c\n return nn\n\n\nmm = cac(1, 2, -8)\nprint(mm)\n\n\ndef cc(lst):\n total = sum(lst)\n return total\n\n\nlst = [1, 2, -8]\nresult = cc(lst)\nprint(result)\n\n\ndef multi(lst):\n result = 1\n for num in lst:\n result *= num\n return result\n\n\ninput_list = [1, 2, -8]\nresult = multi(input_list)\nprint(result)\nn = [50]\nfor i in n:\n if i % 2 == 0:\n print(\"It is a positive number\")\n else:\n print(\"It is a negative number\")\n\n\ndef positive(n=50):\n for i in n:\n if i % 2 == 0:\n print(\"It is a positive number\")\n else:\n print(\"It is a negative number\")\n\n\npositive(n)\nnum = float(input(\"Input a number: \"))\nif num > 0:\n print(\"It is positive number\")\nelif num == 0:\n print(\"It is Zero\")\nelse:\n print(\"It is a negative number\")\nnumber = float(input(\"Enter a number: \"))\nif number > 0:\n print(\"It is a positive number\")\nelif number < 0:\n print(\"It is a negative number\")\nelse:\n print(\"It is zero\")\nnum = float(input(\"Enter a num is:\"))\nif num > 0:\n print(\"It is a positive number\")\nelif num < 0:\n print(\"It is a negative number\")\nelse:\n print(\"It is zero\")\nnumber = [50]\nfor i in number:\n if i % 2 == 0:\n print(\"positive\")\n else:\n print(\"negative\")\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\na = [1, 2, 3, 4, 5, 8, 3, 9, 10]\nb = [2, 8, 5, 4, 3, 1, 0, 9, 4]\nplt.figure(figsize=(5, 3))\nplt.plot(a, b, color=\"red\")\nplt.xlabel(\"x-axis\")\nplt.ylabel(\"y-axis\")\nplt.title(\"Plotting the Graph\")\nplt.show()\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv(\"/kaggle/input/netflix-titles/netflix_titles.csv\")\ndf.head()\ndf.isnull().sum()\ndf1 = df.fillna(value=\"unknown\")\ndf1.isnull().sum()\ndf1.columns\n# Print the column names with spaces visible\nprint([col.strip() for col in df1.columns])\n# Reset the index\ndf1.reset_index(drop=True, inplace=True)\n# Print the column names\nprint(df1.columns)\n# Count the number of movies and TV shows\nmovie_count = df1[df1[\"type\"] == \"Movie\"].shape[0]\ntv_show_count = df1[df1[\"type\"] == \"TV Show\"].shape[0]\n# Create a bar chart\nlabels = [\"Movies\", \"TV Shows\"]\ncounts = [movie_count, tv_show_count]\nplt.bar(labels, counts)\nplt.xlabel(\"Content Type\")\nplt.ylabel(\"Count\")\nplt.title(\"Number of Movies vs TV Shows on Netflix\")\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Count the occurrence of each genre\ngenre_counts = df1[\"listed_in\"].value_counts().head(10)\n# Create a bar chart\nplt.bar(genre_counts.index, genre_counts.values)\nplt.xlabel(\"Genre\")\nplt.ylabel(\"Count\")\nplt.title(\"Top 10 Most Popular Genres on Netflix\")\nplt.xticks(rotation=45, ha=\"right\") # Rotate x-axis labels for better readability\nplt.tight_layout() # Adjust spacing\nplt.show()\n# Create a box plot\nplt.boxplot(df1[\"rating\"])\nplt.ylabel(\"Rating\")\nplt.title(\"Distribution of Ratings on Netflix\")\nplt.show()\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = sns.load_dataset(\"titanic\")\ndf.head()\ndf.isnull().sum()\ndf.mean()\ndf1 = df.fillna(df.mean())\ndf1.isnull().sum()\ndf1[\"sex\"].unique()\n# Convert the data type of the Age column to numeric\ndf1[\"age\"] = pd.to_numeric(df1[\"age\"], errors=\"coerce\")\n# Verify the updated data type of the Age column\nprint(\"Data type of the Age column:\", df1[\"age\"].dtype)\n# Create a box plot to visualize the distribution of age\nsns.boxplot(x=\"age\", data=df1)\nplt.title(\"Distribution of Age\")\nplt.show()\n# Create a scatter plot to visualize the relationship between age and survival\nsns.scatterplot(x=\"age\", y=\"survived\", hue=\"sex\", data=df1)\nplt.title(\"Age vs. Survival\")\nplt.show()\n# Create a bar chart to show the count of passengers by gender\nsns.countplot(x=\"sex\", data=df1)\nplt.title(\"Passenger Count by Gender\")\nplt.show()\n# Create a count plot to show the count of survivors by gender\nsns.countplot(x=\"survived\", hue=\"sex\", data=df1)\nplt.title(\"Survivor Count by Gender\")\nplt.show()\n# Create a heatmap to visualize the relationship between age, gender, and survival\nage_gender_survival = df1.pivot_table(index=\"age\", columns=\"sex\", values=\"survived\")\nsns.heatmap(age_gender_survival, cmap=\"coolwarm\", annot=True, fmt=\".0%\", cbar=True)\nplt.title(\"Survival Rate by Age and Gender\")\nplt.show()\ndf = sns.load_dataset(\"iris\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Load the Iris dataset\ndf = sns.load_dataset(\"iris\")\n# Scatterplot: Sepal Length vs. Sepal Width\nsns.scatterplot(x=\"sepal_length\", y=\"sepal_width\", hue=\"species\", data=df)\nplt.title(\"Scatterplot: Sepal Length vs. Sepal Width\")\nplt.show()\n# Boxplot: Petal Length by Species\nsns.boxplot(x=\"species\", y=\"petal_length\", data=df)\nplt.title(\"Boxplot: Petal Length by Species\")\nplt.show()\n# Violinplot: Petal Width by Species\nsns.violinplot(x=\"species\", y=\"petal_width\", data=df)\nplt.title(\"Violinplot: Petal Width by Species\")\nplt.show()\n# Pairplot: Pairwise Relationships and Distributions\nsns.pairplot(df, hue=\"species\")\nplt.title(\"Pairplot: Pairwise Relationships and Distributions\")\nplt.show()\n# Heatmap: Correlation Matrix\ncorrelation = df.corr()\nsns.heatmap(correlation, annot=True, cmap=\"coolwarm\")\nplt.title(\"Correlation Heatmap\")\nplt.show()\n"},"local_path":{"kind":"string","value":"/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326221.ipynb"},"kaggle_dataset_name":{"kind":"null"},"kaggle_dataset_owner":{"kind":"null"},"kversion":{"kind":"string","value":"[{\"Id\": 129326221, \"ScriptId\": 37853564, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9, \"AuthorUserId\": 12421240, \"CreationDate\": \"05/12/2023 19:56:52\", \"VersionNumber\": 1.0, \"Title\": \"trend\", \"EvaluationDate\": \"05/12/2023\", \"IsChange\": true, \"TotalLines\": 1045.0, \"LinesInsertedFromPrevious\": 1045.0, \"LinesChangedFromPrevious\": 0.0, \"LinesUnchangedFromPrevious\": 0.0, \"LinesInsertedFromFork\": NaN, \"LinesDeletedFromFork\": NaN, \"LinesChangedFromFork\": NaN, \"LinesUnchangedFromFork\": NaN, \"TotalVotes\": 0}]"},"kversion_datasetsources":{"kind":"null"},"dataset_versions":{"kind":"null"},"datasets":{"kind":"null"},"users":{"kind":"null"},"script":{"kind":"string","value":"num = int(input(\"enter a num\"))\nsum = 0\nfor i in range(1, num):\n if num % i == 0:\n sum += i\nif sum == num:\n print(\"The number is a perfect number\")\nelse:\n print(\"The number is not a perfect number\")\na = input(\"Enter a string is:\")\nrev = a[::-1]\nif rev == a:\n print(\"The string is palindrome\")\nelse:\n print(\"The string is not palindrome\")\nnum = int(input(\"Enter a number\"))\nsum = 0\nfor i in range(1, num):\n if num % i == 0:\n sum += i\nif sum == num:\n print(\"The number is perfect\")\nelse:\n print(\"The numbers is not perfect\")\n# Let's take the input of the string\na = input(\"Enter a string\")\nb = input(\"Enter a string to remove\")\nre = a.replace(b, \"\")\nprint(re)\nstring1 = input(\"Enter a string\")\nstring2 = input(\"Enter a string\")\nindex = string1.rfind(string2)\nif index == -1:\n print(\"Index level is not found\")\nelse:\n print(\"Index level is:\", index)\nnum = input(\"Enter a number\")\nn = len(num)\nsum = 0\nfor i in num:\n sum += int(i) ** n\nif sum == int(num):\n print(\"The num is armstronge\")\nelse:\n print(\"The num is not armstronge\")\na = int(input(\"Enter a string\"))\nb = int(input(\"Enter a in 2nd string\"))\na = a + b\nb = a - b\na = a - b\nprint(\"After swapping of two numbers\")\nprint(a)\nprint(b)\nnum = int(input(\"Enter a number\"))\nsum = 0\nfor i in range(1, num):\n sum += i\nif sum == num:\n print(\"Say the number is perfect\")\nelse:\n print(\"The number is not perfect\")\nstring = input(\"Enter a string\")\nstring1 = input(\"Enter a sub string\")\nrev = string.rfind(string1)\nif rev == -1:\n print(\"The index is not found\")\nelse:\n print(\"The index lvl is\", rev)\nnum = input(\"Enter a number\")\nn = len(num)\nsum = 0\nfor i in num:\n sum += int(i) ** n\nif sum == int(num):\n print(\"The number is armstrong\")\nelse:\n print(\"The number is not armstrong\")\nnum1 = int(input(\"Enter a first number\"))\nnum2 = int(input(\"Enter b number\"))\nif num1 > num2:\n num1, num2 = num2, num1\nfor i in range(num2, num1 - 1, -1):\n if i % 2 == 0:\n print(\"The biggest even number is :\", num1, \"and\", num2, \"is\", i)\n break\nnum1 = int(input(\"Enter a number\"))\nnum2 = int(input(\"Enter b number\"))\nif num1 > num2:\n num1, num2 = num2, num1\nfor i in range(num2, num1 - 1, -1):\n if i % 2 == 0:\n print(\"The biggest even number is\", num1, \"and\", num2, \"is\", i)\n break\nn = int(input(\"Enter a number\"))\nharmonic_sum = 0\nfor i in range(1, n):\n harmonic_sum += 1 / i\nprint(\"The Harmonic sum of\", n - 1, \"is\", harmonic_sum)\nn = int(input(\"Enter a number is :\"))\nhar_sum = 0\nfor i in range(1, n):\n har_sum += 1 / i\nprint(\"The har_sum of n is\", n - 1, \"is\", har_sum)\n# Please ensure the platform IDE is in Python 3.x mode.\nnum = input(\"Enter a number: \")\nsteps = 0\nwhile len(num) > 1:\n num = str(sum(int(d) for d in num))\n steps += 1\n print(\"Step-{} Sum: {}\".format(steps, num))\nimport math\n\n\ndef is_prime(n):\n \"\"\"\n This function checks whether a given number is prime or not.\n \"\"\"\n if n <= 1:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\npassage = input(\"Enter the passage: \")\nwords = passage.split()\nprime_words = []\nfor word in words:\n if is_prime(len(word)):\n prime_words.append(word)\nprint(\"Prime words in the passage are: \")\nfor word in prime_words:\n print(word)\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(0, 10, 0.5)\ny = np.sin(x)\nplt.figure(figsize=(5, 2))\nplt.title(\"Sine Curve\")\nplt.subplot(121)\nplt.plot(x, y)\nplt.show()\nx = np.arange(0, 10, 0.5)\ny = np.cos(x)\nplt.figure(figsize=(5, 2))\nplt.title(\"By cose\")\nplt.subplot(122)\nplt.plot(x, y)\nplt.show()\nimport datetime as datetime\n\nnow = datetime.datetime.now()\nprint(now)\nmy_date = datetime.date(2020, 3, 23)\nprint(my_date)\nmy_time = datetime.time(12, 5, 9)\nprint(my_time)\nna = datetime.datetime(2022, 10, 14, 12, 30, 25)\nprint(na)\nfrom bs4 import BeautifulSoup\n\nhtml_doc = \"\"\"\n\n\n My Web Page\n\n\n

Welcome to my Web Page

\n

This is some text.

\n
    \n
  • Item 1
  • \n
  • Item 2
  • \n
  • Item 3
  • \n
\n\n\n\"\"\"\nsoup = BeautifulSoup(html_doc, \"html.parser\")\nprint(soup.prettify())\nsoup = BeautifulSoup(html_doc, \"html.parser\")\nh1_tag = soup.find(\"p\")\nprint(h1_tag.text)\nimport pandas as pd\nimport numpy as np\n\nnn = pd.DataFrame({\"name\": [\"Alice\", \"mois\", \"moin\"], \"age\": [12, 35, 56]})\nnn\narr = np.array([1, 2, 3, 4, 5])\narr_sqrt = arr**2\nprint(arr_sqrt)\narr_sqrt.mean()\nnp.arange(10)\nnazar = np.arange(10)\nmm = nazar.reshape(2, 5)\nprint(mm)\nmaster = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nhh = np.transpose(master)\nprint(hh)\nimport numpy as np\nimport pandas as pd\n\narr = np.array([1, 2, 3, 4, 5])\narr\narr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nnn = np.transpose(arr)\nnn\nar = np.array([[9, 8, 0], [7, 3, 5]])\narr = ar.flatten()\nprint(arr)\narr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nrow = arr2d[1, 0:2]\nprint(row)\narr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])\nn = arr[0, :, :]\nprint(n)\nnn = np.array([[1, 2, 3], [4, 5, 6], [8, 9, 10]])\nt = nn[1, 0]\nprint(t)\nimport math\n\n\ndef is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef find_prime_length_words(sentence):\n words = sentence.split()\n prime_length_words = []\n for word in words:\n if is_prime(len(word)):\n prime_length_words.append(word)\n return prime_length_words\n\n\n# Example usage\ninput_sentence = \"The quick brown fox jumps over the lazy dog.\"\noutput_words = find_prime_length_words(input_sentence)\nprint(\"Output:\", \" \".join(output_words))\nimport math\n\n\ndef is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef find_prime_length_words(sentence):\n words = sentence.split()\n for word in words:\n if is_prime(len(word)):\n prime_length_words.append(words)\n return prime_length_words\n\n\n# Example usage\ninput_sentence = \"The quick brown fox jumps over the lazy dog\"\noutput_words = find_prime_length_words(input_sentence)\nprint(\"output:\", \"\".join(output_words))\n\n\ndef print_poem(poem):\n lines = poem.split(\"\\n\")\n for line in lines:\n line = line.strip()\n if line:\n print(\"\\t\" + line)\n print(\"\\t\\t\" + \"\\t\".join(line.split()))\n\n\n# Example usage\ninput_poem = \"\"\"Twinkle, twinkle, little star,\nHow I wonder what you are!\nUp above the world so high,\nLike a diamond in the sky.\nTwinkle, twinkle, little star,\nHow I wonder what you are!\"\"\"\nprint_poem(input_poem)\npoem = \"Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!\"\n\n\ndef poem_print(poem):\n lines = poem.split(\"\\n\")\n for line in lines:\n line = line.strip()\n if line:\n print(\"\\t\" + line)\n print(\"\\t\\t\" + \"\\t\".join(line.strip()))\n\n\ninput_poem = \"Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!\"\npoem_print(input_poem)\n\n\ndef send_email_greeting(first_name, last_name):\n greeting = \"Hello \" + last_name + \" \" + first_name\n return greeting\n\n\n# Example usage\nfirst_name = \"Dany\"\nlast_name = \"Boon\"\nemail_greeting = send_email_greeting(first_name, last_name)\nprint(email_greeting)\n\n\ndef master(first, last):\n m = \"Hello \" + last + \" \" + first\n return m\n\n\nfirst = \"Nazar\"\nlast = \"Mohammed\"\nemail = master(first, last)\nprint(email)\nr = 6\na = 4 / 3 * 3.142 * r**3\na\n\n\ndef volume(r):\n a = 4 / 3 * 3.1415926535897931 * r**3\n return a\n\n\nvolume(6)\nimport math\n\n\ndef calculate_sphere_volume(radius):\n volume = (4 / 3) * math.pi * radius**3\n return volume\n\n\nradius = 6.0\nsphere_volume = calculate_sphere_volume(radius)\nprint(sphere_volume)\nimport math\n\n\ndef calc(r):\n volume = (4 / 3) * math.pi * radius**3\n return volume\n\n\nr = 6\nsphere = calc(r)\nprint(sphere)\nimport math\n\n\ndef nazar(r):\n v = (4 / 3) * math.pi * r**3\n return v\n\n\ntt = 6.0\nv = nazar(tt)\nprint(v)\n\n\ndef pp(x, y):\n a = (x + y) * (x + y)\n return a\n\n\nx = int(input())\ny = int(input())\nnn = pp(x, y)\nprint(nn)\n\n\ndef cac(a, b, c):\n nn = a + b + c\n return nn\n\n\nmm = cac(1, 2, -8)\nprint(mm)\n\n\ndef cc(lst):\n total = sum(lst)\n return total\n\n\nlst = [1, 2, -8]\nresult = cc(lst)\nprint(result)\n\n\ndef multi(lst):\n result = 1\n for num in lst:\n result *= num\n return result\n\n\ninput_list = [1, 2, -8]\nresult = multi(input_list)\nprint(result)\nn = [50]\nfor i in n:\n if i % 2 == 0:\n print(\"It is a positive number\")\n else:\n print(\"It is a negative number\")\n\n\ndef positive(n=50):\n for i in n:\n if i % 2 == 0:\n print(\"It is a positive number\")\n else:\n print(\"It is a negative number\")\n\n\npositive(n)\nnum = float(input(\"Input a number: \"))\nif num > 0:\n print(\"It is positive number\")\nelif num == 0:\n print(\"It is Zero\")\nelse:\n print(\"It is a negative number\")\nnumber = float(input(\"Enter a number: \"))\nif number > 0:\n print(\"It is a positive number\")\nelif number < 0:\n print(\"It is a negative number\")\nelse:\n print(\"It is zero\")\nnum = float(input(\"Enter a num is:\"))\nif num > 0:\n print(\"It is a positive number\")\nelif num < 0:\n print(\"It is a negative number\")\nelse:\n print(\"It is zero\")\nnumber = [50]\nfor i in number:\n if i % 2 == 0:\n print(\"positive\")\n else:\n print(\"negative\")\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\na = [1, 2, 3, 4, 5, 8, 3, 9, 10]\nb = [2, 8, 5, 4, 3, 1, 0, 9, 4]\nplt.figure(figsize=(5, 3))\nplt.plot(a, b, color=\"red\")\nplt.xlabel(\"x-axis\")\nplt.ylabel(\"y-axis\")\nplt.title(\"Plotting the Graph\")\nplt.show()\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv(\"/kaggle/input/netflix-titles/netflix_titles.csv\")\ndf.head()\ndf.isnull().sum()\ndf1 = df.fillna(value=\"unknown\")\ndf1.isnull().sum()\ndf1.columns\n# Print the column names with spaces visible\nprint([col.strip() for col in df1.columns])\n# Reset the index\ndf1.reset_index(drop=True, inplace=True)\n# Print the column names\nprint(df1.columns)\n# Count the number of movies and TV shows\nmovie_count = df1[df1[\"type\"] == \"Movie\"].shape[0]\ntv_show_count = df1[df1[\"type\"] == \"TV Show\"].shape[0]\n# Create a bar chart\nlabels = [\"Movies\", \"TV Shows\"]\ncounts = [movie_count, tv_show_count]\nplt.bar(labels, counts)\nplt.xlabel(\"Content Type\")\nplt.ylabel(\"Count\")\nplt.title(\"Number of Movies vs TV Shows on Netflix\")\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Count the occurrence of each genre\ngenre_counts = df1[\"listed_in\"].value_counts().head(10)\n# Create a bar chart\nplt.bar(genre_counts.index, genre_counts.values)\nplt.xlabel(\"Genre\")\nplt.ylabel(\"Count\")\nplt.title(\"Top 10 Most Popular Genres on Netflix\")\nplt.xticks(rotation=45, ha=\"right\") # Rotate x-axis labels for better readability\nplt.tight_layout() # Adjust spacing\nplt.show()\n# Create a box plot\nplt.boxplot(df1[\"rating\"])\nplt.ylabel(\"Rating\")\nplt.title(\"Distribution of Ratings on Netflix\")\nplt.show()\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = sns.load_dataset(\"titanic\")\ndf.head()\ndf.isnull().sum()\ndf.mean()\ndf1 = df.fillna(df.mean())\ndf1.isnull().sum()\ndf1[\"sex\"].unique()\n# Convert the data type of the Age column to numeric\ndf1[\"age\"] = pd.to_numeric(df1[\"age\"], errors=\"coerce\")\n# Verify the updated data type of the Age column\nprint(\"Data type of the Age column:\", df1[\"age\"].dtype)\n# Create a box plot to visualize the distribution of age\nsns.boxplot(x=\"age\", data=df1)\nplt.title(\"Distribution of Age\")\nplt.show()\n# Create a scatter plot to visualize the relationship between age and survival\nsns.scatterplot(x=\"age\", y=\"survived\", hue=\"sex\", data=df1)\nplt.title(\"Age vs. Survival\")\nplt.show()\n# Create a bar chart to show the count of passengers by gender\nsns.countplot(x=\"sex\", data=df1)\nplt.title(\"Passenger Count by Gender\")\nplt.show()\n# Create a count plot to show the count of survivors by gender\nsns.countplot(x=\"survived\", hue=\"sex\", data=df1)\nplt.title(\"Survivor Count by Gender\")\nplt.show()\n# Create a heatmap to visualize the relationship between age, gender, and survival\nage_gender_survival = df1.pivot_table(index=\"age\", columns=\"sex\", values=\"survived\")\nsns.heatmap(age_gender_survival, cmap=\"coolwarm\", annot=True, fmt=\".0%\", cbar=True)\nplt.title(\"Survival Rate by Age and Gender\")\nplt.show()\ndf = sns.load_dataset(\"iris\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Load the Iris dataset\ndf = sns.load_dataset(\"iris\")\n# Scatterplot: Sepal Length vs. Sepal Width\nsns.scatterplot(x=\"sepal_length\", y=\"sepal_width\", hue=\"species\", data=df)\nplt.title(\"Scatterplot: Sepal Length vs. Sepal Width\")\nplt.show()\n# Boxplot: Petal Length by Species\nsns.boxplot(x=\"species\", y=\"petal_length\", data=df)\nplt.title(\"Boxplot: Petal Length by Species\")\nplt.show()\n# Violinplot: Petal Width by Species\nsns.violinplot(x=\"species\", y=\"petal_width\", data=df)\nplt.title(\"Violinplot: Petal Width by Species\")\nplt.show()\n# Pairplot: Pairwise Relationships and Distributions\nsns.pairplot(df, hue=\"species\")\nplt.title(\"Pairplot: Pairwise Relationships and Distributions\")\nplt.show()\n# Heatmap: Correlation Matrix\ncorrelation = df.corr()\nsns.heatmap(correlation, annot=True, cmap=\"coolwarm\")\nplt.title(\"Correlation Heatmap\")\nplt.show()\n"},"df_info":{"kind":"string","value":""},"has_data_info":{"kind":"bool","value":false,"string":"false"},"nb_filenames":{"kind":"number","value":0,"string":"0"},"retreived_data_description":{"kind":"string","value":""},"script_nb_tokens":{"kind":"number","value":4887,"string":"4,887"},"upvotes":{"kind":"number","value":0,"string":"0"},"tokens_description":{"kind":"number","value":4887,"string":"4,887"},"tokens_script":{"kind":"number","value":4887,"string":"4,887"}}},{"rowIdx":1160096,"cells":{"file_id":{"kind":"string","value":"129326444"},"content":{"kind":"string","value":"1000_companies_profit\nThe dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines.\n**Includes**\nR&D Spend float64\nAdministration float64\nMarketing Spend float64\nState object\nProfit float64\nKaggle dataset identifier: 1000-companies-profit\nimport pandas as pd\n\ndf = pd.read_csv('1000-companies-profit/1000_Companies.csv')\ndf.info()\n\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 R&D Spend 1000 non-null float64\n 1 Administration 1000 non-null float64\n 2 Marketing Spend 1000 non-null float64\n 3 State 1000 non-null object \n 4 Profit 1000 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 39.2+ KB\nExamples:\n{\n \"R&D Spend\": 165349.2,\n \"Administration\": 136897.8,\n \"Marketing Spend\": 471784.1,\n \"State\": \"New York\",\n \"Profit\": 192261.83\n}\n{\n \"R&D Spend\": 162597.7,\n \"Administration\": 151377.59,\n \"Marketing Spend\": 443898.53,\n \"State\": \"California\",\n \"Profit\": 191792.06\n}\n{\n \"R&D Spend\": 153441.51,\n \"Administration\": 101145.55,\n \"Marketing Spend\": 407934.54,\n \"State\": \"Florida\",\n \"Profit\": 191050.39\n}\n{\n \"R&D Spend\": 144372.41,\n \"Administration\": 118671.85,\n \"Marketing Spend\": 383199.62,\n \"State\": \"New York\",\n \"Profit\": 182901.99\n}\n# # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend\n# The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies.\n# By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn\n\n# ## Loading Data\ndf = pd.read_csv(\"/kaggle/input/1000-companies-profit/1000_Companies.csv\")\ndf.shape\ndf.sample(10)\ndf.isnull().sum()\ndf.corr()\nplt.scatter(df[\"R&D Spend\"], df[\"Profit\"])\nplt.xlabel(\"R&D Spend\")\nplt.ylabel(\"Profit\")\nplt.scatter(df[\"Administration\"], df[\"Profit\"])\nplt.xlabel(\"Administration\")\nplt.ylabel(\"Profit\")\nplt.scatter(df[\"Marketing Spend\"], df[\"Profit\"])\nplt.xlabel(\"Marketing Spend\")\nplt.ylabel(\"Profit\")\n\n# ## Spliting Dataset\nfrom sklearn.model_selection import train_test_split\n\nX, y = df[[\"R&D Spend\", \"Administration\", \"Marketing Spend\"]], df[\"Profit\"]\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.1, random_state=42\n)\n\n# ## Training Dataset using Linear Regression\nfrom sklearn.linear_model import LinearRegression\n\nclf = LinearRegression()\nclf.fit(X_train, y_train)\n\n# ## Predicting Dataset\nclf.predict([[78013.11, 121597.5500, 264346.0600]])\nclf.predict(X_test)\n\n\ndef start():\n R_D = int(input(\"Enter Amout Spend in Research and development:\"))\n Admin = int(input(\"Enter Administration expenses:\"))\n Mar = int(input(\"Enter Marketing Spend Amount\"))\n print(\"Estimated Profit:\", clf.predict([[R_D, Admin, Mar]]))\n start()\n"},"local_path":{"kind":"string","value":"/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326444.ipynb"},"kaggle_dataset_name":{"kind":"string","value":"1000-companies-profit"},"kaggle_dataset_owner":{"kind":"string","value":"rupakroy"},"kversion":{"kind":"string","value":"[{\"Id\": 129326444, \"ScriptId\": 38370784, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9, \"AuthorUserId\": 14110262, \"CreationDate\": \"05/12/2023 19:59:51\", \"VersionNumber\": 2.0, \"Title\": \"Linear Regression Multiple Variables\", \"EvaluationDate\": \"05/12/2023\", \"IsChange\": true, \"TotalLines\": 70.0, \"LinesInsertedFromPrevious\": 9.0, \"LinesChangedFromPrevious\": 0.0, \"LinesUnchangedFromPrevious\": 61.0, \"LinesInsertedFromFork\": NaN, \"LinesDeletedFromFork\": NaN, \"LinesChangedFromFork\": NaN, \"LinesUnchangedFromFork\": NaN, \"TotalVotes\": 0}]"},"kversion_datasetsources":{"kind":"string","value":"[{\"Id\": 185269128, \"KernelVersionId\": 129326444, \"SourceDatasetVersionId\": 3105372}]"},"dataset_versions":{"kind":"string","value":"[{\"Id\": 3105372, \"DatasetId\": 1896237, \"DatasourceVersionId\": 3154274, \"CreatorUserId\": 3072182, \"LicenseName\": \"CC0: Public Domain\", \"CreationDate\": \"01/28/2022 10:49:42\", \"VersionNumber\": 1.0, \"Title\": \"1000_companies_profit\", \"Slug\": \"1000-companies-profit\", \"Subtitle\": \"1000 Companies operating cost sample data list for building regression usecases\", \"Description\": \"The dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines.\\n**Includes**\\nR&D Spend float64\\nAdministration float64\\nMarketing Spend float64\\nState object\\nProfit float64\", \"VersionNotes\": \"Initial release\", \"TotalCompressedBytes\": 0.0, \"TotalUncompressedBytes\": 0.0}]"},"datasets":{"kind":"string","value":"[{\"Id\": 1896237, \"CreatorUserId\": 3072182, \"OwnerUserId\": 3072182.0, \"OwnerOrganizationId\": NaN, \"CurrentDatasetVersionId\": 3105372.0, \"CurrentDatasourceVersionId\": 3154274.0, \"ForumId\": 1919554, \"Type\": 2, \"CreationDate\": \"01/28/2022 10:49:42\", \"LastActivityDate\": \"01/28/2022\", \"TotalViews\": 3171, \"TotalDownloads\": 826, \"TotalVotes\": 10, \"TotalKernels\": 10}]"},"users":{"kind":"string","value":"[{\"Id\": 3072182, \"UserName\": \"rupakroy\", \"DisplayName\": \"Rupak Roy/ Bob\", \"RegisterDate\": \"04/11/2019\", \"PerformanceTier\": 2}]"},"script":{"kind":"string","value":"# # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend\n# The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies.\n# By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn\n\n# ## Loading Data\ndf = pd.read_csv(\"/kaggle/input/1000-companies-profit/1000_Companies.csv\")\ndf.shape\ndf.sample(10)\ndf.isnull().sum()\ndf.corr()\nplt.scatter(df[\"R&D Spend\"], df[\"Profit\"])\nplt.xlabel(\"R&D Spend\")\nplt.ylabel(\"Profit\")\nplt.scatter(df[\"Administration\"], df[\"Profit\"])\nplt.xlabel(\"Administration\")\nplt.ylabel(\"Profit\")\nplt.scatter(df[\"Marketing Spend\"], df[\"Profit\"])\nplt.xlabel(\"Marketing Spend\")\nplt.ylabel(\"Profit\")\n\n# ## Spliting Dataset\nfrom sklearn.model_selection import train_test_split\n\nX, y = df[[\"R&D Spend\", \"Administration\", \"Marketing Spend\"]], df[\"Profit\"]\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.1, random_state=42\n)\n\n# ## Training Dataset using Linear Regression\nfrom sklearn.linear_model import LinearRegression\n\nclf = LinearRegression()\nclf.fit(X_train, y_train)\n\n# ## Predicting Dataset\nclf.predict([[78013.11, 121597.5500, 264346.0600]])\nclf.predict(X_test)\n\n\ndef start():\n R_D = int(input(\"Enter Amout Spend in Research and development:\"))\n Admin = int(input(\"Enter Administration expenses:\"))\n Mar = int(input(\"Enter Marketing Spend Amount\"))\n print(\"Estimated Profit:\", clf.predict([[R_D, Admin, Mar]]))\n start()\n"},"df_info":{"kind":"string","value":"[{\"1000-companies-profit/1000_Companies.csv\": {\"column_names\": \"[\\\"R&D Spend\\\", \\\"Administration\\\", \\\"Marketing Spend\\\", \\\"State\\\", \\\"Profit\\\"]\", \"column_data_types\": \"{\\\"R&D Spend\\\": \\\"float64\\\", \\\"Administration\\\": \\\"float64\\\", \\\"Marketing Spend\\\": \\\"float64\\\", \\\"State\\\": \\\"object\\\", \\\"Profit\\\": \\\"float64\\\"}\", \"info\": \"\\nRangeIndex: 1000 entries, 0 to 999\\nData columns (total 5 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 R&D Spend 1000 non-null float64\\n 1 Administration 1000 non-null float64\\n 2 Marketing Spend 1000 non-null float64\\n 3 State 1000 non-null object \\n 4 Profit 1000 non-null float64\\ndtypes: float64(4), object(1)\\nmemory usage: 39.2+ KB\\n\", \"summary\": \"{\\\"R&D Spend\\\": {\\\"count\\\": 1000.0, \\\"mean\\\": 81668.9272, \\\"std\\\": 46537.56789148918, \\\"min\\\": 0.0, \\\"25%\\\": 43084.5, \\\"50%\\\": 79936.0, \\\"75%\\\": 124565.5, \\\"max\\\": 165349.2}, \\\"Administration\\\": {\\\"count\\\": 1000.0, \\\"mean\\\": 122963.8976117, \\\"std\\\": 12613.927534630991, \\\"min\\\": 51283.14, \\\"25%\\\": 116640.68485, \\\"50%\\\": 122421.61215, \\\"75%\\\": 129139.118, \\\"max\\\": 321652.14}, \\\"Marketing Spend\\\": {\\\"count\\\": 1000.0, \\\"mean\\\": 226205.05841882998, \\\"std\\\": 91578.39354210424, \\\"min\\\": 0.0, \\\"25%\\\": 150969.5846, \\\"50%\\\": 224517.88735, \\\"75%\\\": 308189.808525, \\\"max\\\": 471784.1}, \\\"Profit\\\": {\\\"count\\\": 1000.0, \\\"mean\\\": 119546.16465561, \\\"std\\\": 42888.63384847688, \\\"min\\\": 14681.4, \\\"25%\\\": 85943.1985425, \\\"50%\\\": 117641.4663, \\\"75%\\\": 155577.107425, \\\"max\\\": 476485.43}}\", \"examples\": \"{\\\"R&D Spend\\\":{\\\"0\\\":165349.2,\\\"1\\\":162597.7,\\\"2\\\":153441.51,\\\"3\\\":144372.41},\\\"Administration\\\":{\\\"0\\\":136897.8,\\\"1\\\":151377.59,\\\"2\\\":101145.55,\\\"3\\\":118671.85},\\\"Marketing Spend\\\":{\\\"0\\\":471784.1,\\\"1\\\":443898.53,\\\"2\\\":407934.54,\\\"3\\\":383199.62},\\\"State\\\":{\\\"0\\\":\\\"New York\\\",\\\"1\\\":\\\"California\\\",\\\"2\\\":\\\"Florida\\\",\\\"3\\\":\\\"New York\\\"},\\\"Profit\\\":{\\\"0\\\":192261.83,\\\"1\\\":191792.06,\\\"2\\\":191050.39,\\\"3\\\":182901.99}}\"}}]"},"has_data_info":{"kind":"bool","value":true,"string":"true"},"nb_filenames":{"kind":"number","value":1,"string":"1"},"retreived_data_description":{"kind":"string","value":"1000-companies-profit/1000_Companies.csv:\n\n ['R&D Spend', 'Administration', 'Marketing Spend', 'State', 'Profit']\n\n {'R&D Spend': 'float64', 'Administration': 'float64', 'Marketing Spend': 'float64', 'State': 'object', 'Profit': 'float64'}\n\n {'R&D Spend': {'count': 1000.0, 'mean': 81668.9272, 'std': 46537.56789148918, 'min': 0.0, '25%': 43084.5, '50%': 79936.0, '75%': 124565.5, 'max': 165349.2}, 'Administration': {'count': 1000.0, 'mean': 122963.8976117, 'std': 12613.927534630991, 'min': 51283.14, '25%': 116640.68485, '50%': 122421.61215, '75%': 129139.118, 'max': 321652.14}, 'Marketing Spend': {'count': 1000.0, 'mean': 226205.05841882998, 'std': 91578.39354210424, 'min': 0.0, '25%': 150969.5846, '50%': 224517.88735, '75%': 308189.808525, 'max': 471784.1}, 'Profit': {'count': 1000.0, 'mean': 119546.16465561, 'std': 42888.63384847688, 'min': 14681.4, '25%': 85943.1985425, '50%': 117641.4663, '75%': 155577.107425, 'max': 476485.43}}\n\n RangeIndex: 1000 entries, 0 to 999\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 R&D Spend 1000 non-null float64\n 1 Administration 1000 non-null float64\n 2 Marketing Spend 1000 non-null float64\n 3 State 1000 non-null object \n 4 Profit 1000 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 39.2+ KB\n\n\n {'R&D Spend': {'0': 165349.2, '1': 162597.7, '2': 153441.51, '3': 144372.41}, 'Administration': {'0': 136897.8, '1': 151377.59, '2': 101145.55, '3': 118671.85}, 'Marketing Spend': {'0': 471784.1, '1': 443898.53, '2': 407934.54, '3': 383199.62}, 'State': {'0': 'New York', '1': 'California', '2': 'Florida', '3': 'New York'}, 'Profit': {'0': 192261.83, '1': 191792.06, '2': 191050.39, '3': 182901.99}}\n\n"},"script_nb_tokens":{"kind":"number","value":602,"string":"602"},"upvotes":{"kind":"number","value":0,"string":"0"},"tokens_description":{"kind":"number","value":1224,"string":"1,224"},"tokens_script":{"kind":"number","value":602,"string":"602"}}},{"rowIdx":1160097,"cells":{"file_id":{"kind":"string","value":"129326675"},"content":{"kind":"string","value":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.DataFrame(\n {\n \"Tanggal Keberangkatan\": [\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n \"2023-05-19\",\n ],\n \"Waktu Keberangkatan\": [\"08:00\", \"12:00\", \"14:00\", \"18:00\", \"22:00\"],\n \"Tanggal Kedatangan\": [\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n \"2023-05-19\",\n ],\n \"Waktu Kedatangan\": [\"10:00\", \"14:00\", \"16:00\", \"20:00\", \"00:00\"],\n \"Durasi Penerbangan (jam)\": [2, 2, 2, 2, 2],\n }\n)\nplt.hist(data[\"Durasi Penerbangan (jam)\"])\nplt.xlabel(\"Durasi Penerbangan (jam)\")\nplt.ylabel(\"Frekuensi\")\nplt.title(\"Histogram Durasi Penerbangan\")\nplt.show()\nplt.scatter(data[\"Waktu Keberangkatan\"], data[\"Waktu Kedatangan\"])\nplt.xlabel(\"Waktu Keberangkatan\")\nplt.ylabel(\"Waktu Kedatangan\")\nplt.title(\"Scatter Plot Waktu Keberangkatan vs Waktu Kedatangan\")\nplt.show()\nplt.boxplot(data[\"Durasi Penerbangan (jam)\"])\nplt.ylabel(\"Durasi Penerbangan (jam)\")\nplt.title(\"Box Plot Durasi Penerbangan\")\nplt.show()\nplt.bar(data[\"Tanggal Keberangkatan\"], [1, 2, 3, 2, 1])\nplt.xlabel(\"Tanggal Keberangkatan\")\nplt.ylabel(\"Jumlah Penerbangan\")\nplt.title(\"Bar Chart Jumlah Penerbangan Berdasarkan Tanggal Keberangkatan\")\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = {\n \"tanggal_keberangkatan\": [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n ],\n \"waktu_keberangkatan\": [1200, 1300, 1400, 1500, 1600],\n \"tanggal_kedatangan\": [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n ],\n \"waktu_kedatangan\": [1430, 1530, 1630, 1730, 1830],\n \"durasi_penerbangan\": [2.5, 2.5, 2.5, 2.5, 2.5],\n}\ndf = pd.DataFrame(data)\ndf.set_index(\"tanggal_keberangkatan\", inplace=True)\nfig, ax = plt.subplots()\nax.plot(df.index, df[\"durasi_penerbangan\"], label=\"Durasi Penerbangan\")\nax.set_title(\"Grafik Durasi Penerbangan Maskapai\")\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Durasi Penerbangan (jam)\")\nplt.xticks(rotation=45)\nax.legend()\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = {\n \"tanggal_keberangkatan\": [\n \"2023-05-14\",\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n ],\n \"waktu_keberangkatan\": [\"08:00\", \"10:00\", \"12:00\", \"14:00\", \"16:00\"],\n \"tanggal_kedatangan\": [\n \"2023-05-14\",\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n ],\n \"waktu_kedatangan\": [\"09:30\", \"11:30\", \"13:30\", \"15:30\", \"17:30\"],\n \"durasi_penerbangan\": [90, 90, 90, 90, 90],\n}\ndf = pd.DataFrame(data)\nfig, ax = plt.subplots()\nax.scatter(df[\"tanggal_keberangkatan\"], df[\"durasi_penerbangan\"])\nax.set_title(\"Durasi Penerbangan Berdasarkan Tanggal Keberangkatan\")\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Durasi Penerbangan (menit)\")\nplt.show()\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndata = pd.DataFrame(\n {\n \"tanggal_keberangkatan\": [\n \"2021-05-13\",\n \"2021-05-13\",\n \"2021-05-14\",\n \"2021-05-14\",\n \"2021-05-15\",\n \"2021-05-15\",\n ],\n \"waktu_keberangkatan\": [\n \"08:00:00\",\n \"12:00:00\",\n \"08:00:00\",\n \"12:00:00\",\n \"08:00:00\",\n \"12:00:00\",\n ],\n \"tanggal_kedatangan\": [\n \"2021-05-13\",\n \"2021-05-13\",\n \"2021-05-14\",\n \"2021-05-14\",\n \"2021-05-15\",\n \"2021-05-15\",\n ],\n \"waktu_kedatangan\": [\n \"10:30:00\",\n \"14:30:00\",\n \"10:30:00\",\n \"14:30:00\",\n \"10:30:00\",\n \"14:30:00\",\n ],\n \"durasi_penerbangan\": [2.5, 2.5, 2.5, 2.5, 2.5, 2.5],\n }\n)\ndata[\"tanggal_keberangkatan\"] = pd.to_datetime(data[\"tanggal_keberangkatan\"])\ndata[\"tanggal_kedatangan\"] = pd.to_datetime(data[\"tanggal_kedatangan\"])\ndata[\"day_of_week\"] = data[\"tanggal_keberangkatan\"].dt.dayofweek\npivot = pd.pivot_table(\n data,\n values=\"durasi_penerbangan\",\n index=\"day_of_week\",\n columns=\"waktu_keberangkatan\",\n)\nsns.heatmap(pivot, cmap=\"YlGnBu\", annot=True, fmt=\".1f\")\nplt.title(\"Durasi Penerbangan per Hari dalam Seminggu dan Waktu Keberangkatan\")\nplt.xlabel(\"Waktu Keberangkatan\")\nplt.ylabel(\"Hari dalam Seminggu\")\nplt.show()\nimport matplotlib.pyplot as plt\n\ntanggal_keberangkatan = [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n]\njumlah_penerbangan = [80, 70, 90, 85, 75]\nfig, ax = plt.subplots()\nax.bar(tanggal_keberangkatan, jumlah_penerbangan)\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Jumlah Penerbangan\")\nax.set_title(\"Grafik Jumlah Penerbangan Maskapai XYZ\")\nplt.show()\nimport folium\n\ndata_penerbangan = [\n {\"keberangkatan\": \"Jakarta\", \"tujuan\": \"Surabaya\"},\n {\"keberangkatan\": \"Jakarta\", \"tujuan\": \"Bali\"},\n {\"keberangkatan\": \"Surabaya\", \"tujuan\": \"Bali\"},\n {\"keberangkatan\": \"Surabaya\", \"tujuan\": \"Jakarta\"},\n {\"keberangkatan\": \"Bali\", \"tujuan\": \"Jakarta\"},\n {\"keberangkatan\": \"Bali\", \"tujuan\": \"Surabaya\"},\n]\npeta = folium.Map(location=[-2.548926, 118.014863], zoom_start=5)\nfor p in data_penerbangan:\n folium.Marker(\n location=[-6.1753924, 106.8271528],\n popup=p[\"keberangkatan\"],\n icon=folium.Icon(color=\"red\"),\n ).add_to(peta)\nfor p in data_penerbangan:\n folium.Marker(\n location=[-8.4095181, 115.188916],\n popup=p[\"tujuan\"],\n icon=folium.Icon(color=\"green\"),\n ).add_to(peta)\npeta\n"},"local_path":{"kind":"string","value":"/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326675.ipynb"},"kaggle_dataset_name":{"kind":"null"},"kaggle_dataset_owner":{"kind":"null"},"kversion":{"kind":"string","value":"[{\"Id\": 129326675, \"ScriptId\": 38449348, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9, \"AuthorUserId\": 14545408, \"CreationDate\": \"05/12/2023 20:03:15\", \"VersionNumber\": 1.0, \"Title\": \"notebook8ef928e75b\", \"EvaluationDate\": \"05/12/2023\", \"IsChange\": true, \"TotalLines\": 147.0, \"LinesInsertedFromPrevious\": 147.0, \"LinesChangedFromPrevious\": 0.0, \"LinesUnchangedFromPrevious\": 0.0, \"LinesInsertedFromFork\": NaN, \"LinesDeletedFromFork\": NaN, \"LinesChangedFromFork\": NaN, \"LinesUnchangedFromFork\": NaN, \"TotalVotes\": 0}]"},"kversion_datasetsources":{"kind":"null"},"dataset_versions":{"kind":"null"},"datasets":{"kind":"null"},"users":{"kind":"null"},"script":{"kind":"string","value":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.DataFrame(\n {\n \"Tanggal Keberangkatan\": [\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n \"2023-05-19\",\n ],\n \"Waktu Keberangkatan\": [\"08:00\", \"12:00\", \"14:00\", \"18:00\", \"22:00\"],\n \"Tanggal Kedatangan\": [\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n \"2023-05-19\",\n ],\n \"Waktu Kedatangan\": [\"10:00\", \"14:00\", \"16:00\", \"20:00\", \"00:00\"],\n \"Durasi Penerbangan (jam)\": [2, 2, 2, 2, 2],\n }\n)\nplt.hist(data[\"Durasi Penerbangan (jam)\"])\nplt.xlabel(\"Durasi Penerbangan (jam)\")\nplt.ylabel(\"Frekuensi\")\nplt.title(\"Histogram Durasi Penerbangan\")\nplt.show()\nplt.scatter(data[\"Waktu Keberangkatan\"], data[\"Waktu Kedatangan\"])\nplt.xlabel(\"Waktu Keberangkatan\")\nplt.ylabel(\"Waktu Kedatangan\")\nplt.title(\"Scatter Plot Waktu Keberangkatan vs Waktu Kedatangan\")\nplt.show()\nplt.boxplot(data[\"Durasi Penerbangan (jam)\"])\nplt.ylabel(\"Durasi Penerbangan (jam)\")\nplt.title(\"Box Plot Durasi Penerbangan\")\nplt.show()\nplt.bar(data[\"Tanggal Keberangkatan\"], [1, 2, 3, 2, 1])\nplt.xlabel(\"Tanggal Keberangkatan\")\nplt.ylabel(\"Jumlah Penerbangan\")\nplt.title(\"Bar Chart Jumlah Penerbangan Berdasarkan Tanggal Keberangkatan\")\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = {\n \"tanggal_keberangkatan\": [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n ],\n \"waktu_keberangkatan\": [1200, 1300, 1400, 1500, 1600],\n \"tanggal_kedatangan\": [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n ],\n \"waktu_kedatangan\": [1430, 1530, 1630, 1730, 1830],\n \"durasi_penerbangan\": [2.5, 2.5, 2.5, 2.5, 2.5],\n}\ndf = pd.DataFrame(data)\ndf.set_index(\"tanggal_keberangkatan\", inplace=True)\nfig, ax = plt.subplots()\nax.plot(df.index, df[\"durasi_penerbangan\"], label=\"Durasi Penerbangan\")\nax.set_title(\"Grafik Durasi Penerbangan Maskapai\")\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Durasi Penerbangan (jam)\")\nplt.xticks(rotation=45)\nax.legend()\nplt.show()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = {\n \"tanggal_keberangkatan\": [\n \"2023-05-14\",\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n ],\n \"waktu_keberangkatan\": [\"08:00\", \"10:00\", \"12:00\", \"14:00\", \"16:00\"],\n \"tanggal_kedatangan\": [\n \"2023-05-14\",\n \"2023-05-15\",\n \"2023-05-16\",\n \"2023-05-17\",\n \"2023-05-18\",\n ],\n \"waktu_kedatangan\": [\"09:30\", \"11:30\", \"13:30\", \"15:30\", \"17:30\"],\n \"durasi_penerbangan\": [90, 90, 90, 90, 90],\n}\ndf = pd.DataFrame(data)\nfig, ax = plt.subplots()\nax.scatter(df[\"tanggal_keberangkatan\"], df[\"durasi_penerbangan\"])\nax.set_title(\"Durasi Penerbangan Berdasarkan Tanggal Keberangkatan\")\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Durasi Penerbangan (menit)\")\nplt.show()\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndata = pd.DataFrame(\n {\n \"tanggal_keberangkatan\": [\n \"2021-05-13\",\n \"2021-05-13\",\n \"2021-05-14\",\n \"2021-05-14\",\n \"2021-05-15\",\n \"2021-05-15\",\n ],\n \"waktu_keberangkatan\": [\n \"08:00:00\",\n \"12:00:00\",\n \"08:00:00\",\n \"12:00:00\",\n \"08:00:00\",\n \"12:00:00\",\n ],\n \"tanggal_kedatangan\": [\n \"2021-05-13\",\n \"2021-05-13\",\n \"2021-05-14\",\n \"2021-05-14\",\n \"2021-05-15\",\n \"2021-05-15\",\n ],\n \"waktu_kedatangan\": [\n \"10:30:00\",\n \"14:30:00\",\n \"10:30:00\",\n \"14:30:00\",\n \"10:30:00\",\n \"14:30:00\",\n ],\n \"durasi_penerbangan\": [2.5, 2.5, 2.5, 2.5, 2.5, 2.5],\n }\n)\ndata[\"tanggal_keberangkatan\"] = pd.to_datetime(data[\"tanggal_keberangkatan\"])\ndata[\"tanggal_kedatangan\"] = pd.to_datetime(data[\"tanggal_kedatangan\"])\ndata[\"day_of_week\"] = data[\"tanggal_keberangkatan\"].dt.dayofweek\npivot = pd.pivot_table(\n data,\n values=\"durasi_penerbangan\",\n index=\"day_of_week\",\n columns=\"waktu_keberangkatan\",\n)\nsns.heatmap(pivot, cmap=\"YlGnBu\", annot=True, fmt=\".1f\")\nplt.title(\"Durasi Penerbangan per Hari dalam Seminggu dan Waktu Keberangkatan\")\nplt.xlabel(\"Waktu Keberangkatan\")\nplt.ylabel(\"Hari dalam Seminggu\")\nplt.show()\nimport matplotlib.pyplot as plt\n\ntanggal_keberangkatan = [\n \"2022-01-01\",\n \"2022-01-02\",\n \"2022-01-03\",\n \"2022-01-04\",\n \"2022-01-05\",\n]\njumlah_penerbangan = [80, 70, 90, 85, 75]\nfig, ax = plt.subplots()\nax.bar(tanggal_keberangkatan, jumlah_penerbangan)\nax.set_xlabel(\"Tanggal Keberangkatan\")\nax.set_ylabel(\"Jumlah Penerbangan\")\nax.set_title(\"Grafik Jumlah Penerbangan Maskapai XYZ\")\nplt.show()\nimport folium\n\ndata_penerbangan = [\n {\"keberangkatan\": \"Jakarta\", \"tujuan\": \"Surabaya\"},\n {\"keberangkatan\": \"Jakarta\", \"tujuan\": \"Bali\"},\n {\"keberangkatan\": \"Surabaya\", \"tujuan\": \"Bali\"},\n {\"keberangkatan\": \"Surabaya\", \"tujuan\": \"Jakarta\"},\n {\"keberangkatan\": \"Bali\", \"tujuan\": \"Jakarta\"},\n {\"keberangkatan\": \"Bali\", \"tujuan\": \"Surabaya\"},\n]\npeta = folium.Map(location=[-2.548926, 118.014863], zoom_start=5)\nfor p in data_penerbangan:\n folium.Marker(\n location=[-6.1753924, 106.8271528],\n popup=p[\"keberangkatan\"],\n icon=folium.Icon(color=\"red\"),\n ).add_to(peta)\nfor p in data_penerbangan:\n folium.Marker(\n location=[-8.4095181, 115.188916],\n popup=p[\"tujuan\"],\n icon=folium.Icon(color=\"green\"),\n ).add_to(peta)\npeta\n"},"df_info":{"kind":"string","value":""},"has_data_info":{"kind":"bool","value":false,"string":"false"},"nb_filenames":{"kind":"number","value":0,"string":"0"},"retreived_data_description":{"kind":"string","value":""},"script_nb_tokens":{"kind":"number","value":2540,"string":"2,540"},"upvotes":{"kind":"number","value":0,"string":"0"},"tokens_description":{"kind":"number","value":2540,"string":"2,540"},"tokens_script":{"kind":"number","value":2540,"string":"2,540"}}},{"rowIdx":1160098,"cells":{"file_id":{"kind":"string","value":"129326643"},"content":{"kind":"string","value":"from datetime import datetime, timedelta\n\ndeparture_date = \"2022-05-15\"\ndeparture_time = \"07:30\"\narrival_date = \"2022-05-16\"\narrival_time = \"10:45\"\ndeparture_datetime = datetime.strptime(\n departure_date + \" \" + departure_time, \"%Y-%m-%d %H:%M\"\n)\narrival_datetime = datetime.strptime(\n arrival_date + \" \" + arrival_time, \"%Y-%m-%d %H:%M\"\n)\nflight_duration = arrival_datetime - departure_datetime\nprint(\"Durasi penerbangan:\", flight_duration)\nprint(\"Durasi penerbangan (jam):\", flight_duration.total_seconds() / 3600)\nfrom datetime import datetime\n\ndeparture_datetime = \"2022-05-15 07:30\"\narrival_datetime = \"2022-05-16 10:45\"\ndeparture_date, departure_time = departure_datetime.split(\" \")\ndeparture_year, departure_month, departure_day = departure_date.split(\"-\")\ndeparture_hour, departure_minute = departure_time.split(\":\")\narrival_date, arrival_time = arrival_datetime.split(\" \")\narrival_year, arrival_month, arrival_day = arrival_date.split(\"-\")\narrival_hour, arrival_minute = arrival_time.split(\":\")\nprint(\"Tanggal keberangkatan:\", departure_date)\nprint(\"Waktu keberangkatan:\", departure_time)\nprint(\"Tanggal kedatangan:\", arrival_date)\nprint(\"Waktu kedatangan:\", arrival_time)\nimport pytz\nfrom datetime import datetime\n\nwaktu_asli = datetime(2022, 5, 15, 10, 30, 0)\nzona_waktu_asli = pytz.timezone(\"Asia/Jakarta\")\nzona_waktu_tujuan = pytz.timezone(\"US/Pacific\")\nwaktu_tujuan = zona_waktu_asli.localize(waktu_asli).astimezone(zona_waktu_tujuan)\nprint(\"Waktu asli:\", waktu_asli)\nprint(\"Zona waktu asli:\", zona_waktu_asli)\nprint(\"Waktu konversi:\", waktu_tujuan)\nprint(\"Zona waktu konversi:\", zona_waktu_tujuan)\nfrom datetime import datetime, timedelta\n\ntgl_keberangkatan = \"2023-05-15\"\nwaktu_keberangkatan = \"07:30\"\ntgl_kedatangan = \"2023-05-16\"\nwaktu_kedatangan = \"01:45\"\nkeberangkatan = datetime.strptime(\n tgl_keberangkatan + \" \" + waktu_keberangkatan, \"%Y-%m-%d %H:%M\"\n)\nkedatangan = datetime.strptime(\n tgl_kedatangan + \" \" + waktu_kedatangan, \"%Y-%m-%d %H:%M\"\n)\ndurasi_penerbangan = kedatangan - keberangkatan\nprint(\"Durasi penerbangan: \", durasi_penerbangan)\nimport datetime\n\nwaktu_jam = \"2022-05-14 13:30:00\"\nwaktu_jam_dt = datetime.datetime.strptime(waktu_jam, \"%Y-%m-%d %H:%M:%S\")\nwaktu_hari = waktu_jam_dt.date()\nprint(\"Waktu jam:\", waktu_jam)\nprint(\"Waktu hari:\", waktu_hari)\nfrom datetime import datetime\n\ntanggal_keberangkatan = \"2022-05-20\"\nwaktu_keberangkatan = \"07:30:00\"\ntanggalkan_keberangkatan_str = tanggal_keberangkatan + \" \" + waktu_keberangkatan\ntanggalkan_keberangkatan_dt = datetime.strptime(\n tanggalkan_keberangkatan_str, \"%Y-%m-%d %H:%M:%S\"\n)\nprint(tanggalkan_keberangkatan_dt)\n"},"local_path":{"kind":"string","value":"/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326643.ipynb"},"kaggle_dataset_name":{"kind":"null"},"kaggle_dataset_owner":{"kind":"null"},"kversion":{"kind":"string","value":"[{\"Id\": 129326643, \"ScriptId\": 38448290, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9, \"AuthorUserId\": 14545408, \"CreationDate\": \"05/12/2023 20:02:48\", \"VersionNumber\": 1.0, \"Title\": \"notebook3068a9f5db\", \"EvaluationDate\": \"05/12/2023\", \"IsChange\": true, \"TotalLines\": 81.0, \"LinesInsertedFromPrevious\": 81.0, \"LinesChangedFromPrevious\": 0.0, \"LinesUnchangedFromPrevious\": 0.0, \"LinesInsertedFromFork\": NaN, \"LinesDeletedFromFork\": NaN, \"LinesChangedFromFork\": NaN, \"LinesUnchangedFromFork\": NaN, \"TotalVotes\": 0}]"},"kversion_datasetsources":{"kind":"null"},"dataset_versions":{"kind":"null"},"datasets":{"kind":"null"},"users":{"kind":"null"},"script":{"kind":"string","value":"from datetime import datetime, timedelta\n\ndeparture_date = \"2022-05-15\"\ndeparture_time = \"07:30\"\narrival_date = \"2022-05-16\"\narrival_time = \"10:45\"\ndeparture_datetime = datetime.strptime(\n departure_date + \" \" + departure_time, \"%Y-%m-%d %H:%M\"\n)\narrival_datetime = datetime.strptime(\n arrival_date + \" \" + arrival_time, \"%Y-%m-%d %H:%M\"\n)\nflight_duration = arrival_datetime - departure_datetime\nprint(\"Durasi penerbangan:\", flight_duration)\nprint(\"Durasi penerbangan (jam):\", flight_duration.total_seconds() / 3600)\nfrom datetime import datetime\n\ndeparture_datetime = \"2022-05-15 07:30\"\narrival_datetime = \"2022-05-16 10:45\"\ndeparture_date, departure_time = departure_datetime.split(\" \")\ndeparture_year, departure_month, departure_day = departure_date.split(\"-\")\ndeparture_hour, departure_minute = departure_time.split(\":\")\narrival_date, arrival_time = arrival_datetime.split(\" \")\narrival_year, arrival_month, arrival_day = arrival_date.split(\"-\")\narrival_hour, arrival_minute = arrival_time.split(\":\")\nprint(\"Tanggal keberangkatan:\", departure_date)\nprint(\"Waktu keberangkatan:\", departure_time)\nprint(\"Tanggal kedatangan:\", arrival_date)\nprint(\"Waktu kedatangan:\", arrival_time)\nimport pytz\nfrom datetime import datetime\n\nwaktu_asli = datetime(2022, 5, 15, 10, 30, 0)\nzona_waktu_asli = pytz.timezone(\"Asia/Jakarta\")\nzona_waktu_tujuan = pytz.timezone(\"US/Pacific\")\nwaktu_tujuan = zona_waktu_asli.localize(waktu_asli).astimezone(zona_waktu_tujuan)\nprint(\"Waktu asli:\", waktu_asli)\nprint(\"Zona waktu asli:\", zona_waktu_asli)\nprint(\"Waktu konversi:\", waktu_tujuan)\nprint(\"Zona waktu konversi:\", zona_waktu_tujuan)\nfrom datetime import datetime, timedelta\n\ntgl_keberangkatan = \"2023-05-15\"\nwaktu_keberangkatan = \"07:30\"\ntgl_kedatangan = \"2023-05-16\"\nwaktu_kedatangan = \"01:45\"\nkeberangkatan = datetime.strptime(\n tgl_keberangkatan + \" \" + waktu_keberangkatan, \"%Y-%m-%d %H:%M\"\n)\nkedatangan = datetime.strptime(\n tgl_kedatangan + \" \" + waktu_kedatangan, \"%Y-%m-%d %H:%M\"\n)\ndurasi_penerbangan = kedatangan - keberangkatan\nprint(\"Durasi penerbangan: \", durasi_penerbangan)\nimport datetime\n\nwaktu_jam = \"2022-05-14 13:30:00\"\nwaktu_jam_dt = datetime.datetime.strptime(waktu_jam, \"%Y-%m-%d %H:%M:%S\")\nwaktu_hari = waktu_jam_dt.date()\nprint(\"Waktu jam:\", waktu_jam)\nprint(\"Waktu hari:\", waktu_hari)\nfrom datetime import datetime\n\ntanggal_keberangkatan = \"2022-05-20\"\nwaktu_keberangkatan = \"07:30:00\"\ntanggalkan_keberangkatan_str = tanggal_keberangkatan + \" \" + waktu_keberangkatan\ntanggalkan_keberangkatan_dt = datetime.strptime(\n tanggalkan_keberangkatan_str, \"%Y-%m-%d %H:%M:%S\"\n)\nprint(tanggalkan_keberangkatan_dt)\n"},"df_info":{"kind":"string","value":""},"has_data_info":{"kind":"bool","value":false,"string":"false"},"nb_filenames":{"kind":"number","value":0,"string":"0"},"retreived_data_description":{"kind":"string","value":""},"script_nb_tokens":{"kind":"number","value":1080,"string":"1,080"},"upvotes":{"kind":"number","value":0,"string":"0"},"tokens_description":{"kind":"number","value":1080,"string":"1,080"},"tokens_script":{"kind":"number","value":1080,"string":"1,080"}}},{"rowIdx":1160099,"cells":{"file_id":{"kind":"string","value":"129326661"},"content":{"kind":"string","value":"import matplotlib.pyplot as plt\n\nharga = [10, 15, 20, 25, 30, 35, 40, 45, 50]\njumlah_penjualan = [100, 150, 200, 250, 300, 350, 400, 450, 500]\nmargin_keuntungan = [5, 10, 15, 20, 25, 30, 35, 40, 45]\nrating_pelanggan = [2, 3, 3, 4, 4, 4, 5, 5, 5]\ndata = [harga, jumlah_penjualan, margin_keuntungan, rating_pelanggan]\nplt.boxplot(data)\nplt.xticks(\n [1, 2, 3, 4], [\"Harga\", \"Jumlah Penjualan\", \"Margin Keuntungan\", \"Rating Pelanggan\"]\n)\nplt.title(\"Box Plot Variabel Numerik\")\nplt.show()\nimport matplotlib.pyplot as plt\n\nharga = [1000, 2000, 1500, 3000, 2500]\njumlah_penjualan = [50, 100, 70, 150, 120]\nmargin_keuntungan = [10, 15, 12, 20, 18]\nrating_pelanggan = [3.5, 4.0, 3.7, 4.2, 4.1]\nplt.scatter(harga, jumlah_penjualan, color=\"red\", label=\"Jumlah Penjualan\")\nplt.scatter(harga, margin_keuntungan, color=\"blue\", label=\"Margin Keuntungan\")\nplt.scatter(harga, rating_pelanggan, color=\"green\", label=\"Rating Pelanggan\")\nplt.xlabel(\"Harga\")\nplt.legend()\nplt.show()\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.random.rand(4, 4)\nx_labels = [\"Harga\", \"Jumlah Penjualan\", \"Margin Keuntungan\", \"Rating Pelanggan\"]\ny_labels = [\"Produk 1\", \"Produk 2\", \"Produk 3\", \"Produk 4\"]\nfig, ax = plt.subplots()\nim = ax.imshow(data, cmap=\"Blues\")\nax.set_xticks(np.arange(len(x_labels)))\nax.set_yticks(np.arange(len(y_labels)))\nax.set_xticklabels(x_labels)\nax.set_yticklabels(y_labels)\nplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\nfor i in range(len(y_labels)):\n for j in range(len(x_labels)):\n text = ax.text(\n j, i, f\"{data[i,j]:.2f}\", ha=\"center\", va=\"center\", color=\"black\"\n )\ncbar = ax.figure.colorbar(im, ax=ax)\nax.set_title(\"Heatmap\")\nplt.show()\nimport matplotlib.pyplot as plt\n\nharga = [10, 20, 30, 40, 50]\npenjualan = [100, 200, 150, 250, 300]\nmargin = [20, 25, 30, 35, 40]\nrating = [4.2, 4.5, 4.0, 4.3, 4.7]\nplt.plot(harga, penjualan, marker=\"o\")\nplt.plot(harga, margin, marker=\"o\")\nplt.plot(harga, rating, marker=\"o\")\nplt.xlabel(\"Harga\")\nplt.ylabel(\"Jumlah\")\nplt.title(\"Grafik Line Plot\")\nplt.show()\nimport seaborn as sns\nimport pandas as pd\n\ndata = {\n \"Harga\": [500000, 600000, 750000, 900000, 1000000],\n \"Jumlah Penjualan\": [10, 12, 15, 18, 20],\n \"Margin Keuntungan\": [200000, 250000, 300000, 350000, 400000],\n \"Rating Pelanggan\": [3.5, 4.0, 4.5, 4.7, 5.0],\n}\ndf = pd.DataFrame(data)\ncorr = df.corr()\nsns.heatmap(corr, annot=True, cmap=\"coolwarm\")\n"},"local_path":{"kind":"string","value":"/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326661.ipynb"},"kaggle_dataset_name":{"kind":"null"},"kaggle_dataset_owner":{"kind":"null"},"kversion":{"kind":"string","value":"[{\"Id\": 129326661, \"ScriptId\": 38448962, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9, \"AuthorUserId\": 14545408, \"CreationDate\": \"05/12/2023 20:03:04\", \"VersionNumber\": 1.0, \"Title\": \"notebook1305764811\", \"EvaluationDate\": \"05/12/2023\", \"IsChange\": true, \"TotalLines\": 92.0, \"LinesInsertedFromPrevious\": 92.0, \"LinesChangedFromPrevious\": 0.0, \"LinesUnchangedFromPrevious\": 0.0, \"LinesInsertedFromFork\": NaN, \"LinesDeletedFromFork\": NaN, \"LinesChangedFromFork\": NaN, \"LinesUnchangedFromFork\": NaN, \"TotalVotes\": 0}]"},"kversion_datasetsources":{"kind":"null"},"dataset_versions":{"kind":"null"},"datasets":{"kind":"null"},"users":{"kind":"null"},"script":{"kind":"string","value":"import matplotlib.pyplot as plt\n\nharga = [10, 15, 20, 25, 30, 35, 40, 45, 50]\njumlah_penjualan = [100, 150, 200, 250, 300, 350, 400, 450, 500]\nmargin_keuntungan = [5, 10, 15, 20, 25, 30, 35, 40, 45]\nrating_pelanggan = [2, 3, 3, 4, 4, 4, 5, 5, 5]\ndata = [harga, jumlah_penjualan, margin_keuntungan, rating_pelanggan]\nplt.boxplot(data)\nplt.xticks(\n [1, 2, 3, 4], [\"Harga\", \"Jumlah Penjualan\", \"Margin Keuntungan\", \"Rating Pelanggan\"]\n)\nplt.title(\"Box Plot Variabel Numerik\")\nplt.show()\nimport matplotlib.pyplot as plt\n\nharga = [1000, 2000, 1500, 3000, 2500]\njumlah_penjualan = [50, 100, 70, 150, 120]\nmargin_keuntungan = [10, 15, 12, 20, 18]\nrating_pelanggan = [3.5, 4.0, 3.7, 4.2, 4.1]\nplt.scatter(harga, jumlah_penjualan, color=\"red\", label=\"Jumlah Penjualan\")\nplt.scatter(harga, margin_keuntungan, color=\"blue\", label=\"Margin Keuntungan\")\nplt.scatter(harga, rating_pelanggan, color=\"green\", label=\"Rating Pelanggan\")\nplt.xlabel(\"Harga\")\nplt.legend()\nplt.show()\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.random.rand(4, 4)\nx_labels = [\"Harga\", \"Jumlah Penjualan\", \"Margin Keuntungan\", \"Rating Pelanggan\"]\ny_labels = [\"Produk 1\", \"Produk 2\", \"Produk 3\", \"Produk 4\"]\nfig, ax = plt.subplots()\nim = ax.imshow(data, cmap=\"Blues\")\nax.set_xticks(np.arange(len(x_labels)))\nax.set_yticks(np.arange(len(y_labels)))\nax.set_xticklabels(x_labels)\nax.set_yticklabels(y_labels)\nplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\nfor i in range(len(y_labels)):\n for j in range(len(x_labels)):\n text = ax.text(\n j, i, f\"{data[i,j]:.2f}\", ha=\"center\", va=\"center\", color=\"black\"\n )\ncbar = ax.figure.colorbar(im, ax=ax)\nax.set_title(\"Heatmap\")\nplt.show()\nimport matplotlib.pyplot as plt\n\nharga = [10, 20, 30, 40, 50]\npenjualan = [100, 200, 150, 250, 300]\nmargin = [20, 25, 30, 35, 40]\nrating = [4.2, 4.5, 4.0, 4.3, 4.7]\nplt.plot(harga, penjualan, marker=\"o\")\nplt.plot(harga, margin, marker=\"o\")\nplt.plot(harga, rating, marker=\"o\")\nplt.xlabel(\"Harga\")\nplt.ylabel(\"Jumlah\")\nplt.title(\"Grafik Line Plot\")\nplt.show()\nimport seaborn as sns\nimport pandas as pd\n\ndata = {\n \"Harga\": [500000, 600000, 750000, 900000, 1000000],\n \"Jumlah Penjualan\": [10, 12, 15, 18, 20],\n \"Margin Keuntungan\": [200000, 250000, 300000, 350000, 400000],\n \"Rating Pelanggan\": [3.5, 4.0, 4.5, 4.7, 5.0],\n}\ndf = pd.DataFrame(data)\ncorr = df.corr()\nsns.heatmap(corr, annot=True, cmap=\"coolwarm\")\n"},"df_info":{"kind":"string","value":""},"has_data_info":{"kind":"bool","value":false,"string":"false"},"nb_filenames":{"kind":"number","value":0,"string":"0"},"retreived_data_description":{"kind":"string","value":""},"script_nb_tokens":{"kind":"number","value":1177,"string":"1,177"},"upvotes":{"kind":"number","value":0,"string":"0"},"tokens_description":{"kind":"number","value":1177,"string":"1,177"},"tokens_script":{"kind":"number","value":1177,"string":"1,177"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":11600,"numItemsPerPage":100,"numTotalItems":1160428,"offset":1160000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODAwMDkzNSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9rYWdnbGVfc2NyaXB0c19uZXdfZm9ybWF0X3N1YnNldCIsImV4cCI6MTc1ODAwNDUzNSwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.VZKSLaOC-aWTvPmH6WDkUz8Tza0Im1Z8pFNTf17grE2nU8ToPxUqNdQZPJNdpj8iYGCEDfdOHXkrKeIXrxABAA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
129219106
# # Load libraries import os import numpy as np import pandas as pd from glob import glob import torch import torchvision from torchvision import transforms from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader, random_split from PIL import Image, ImageFile import matplotlib.pyplot as plt from datetime import datetime import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import zipfile # Avoid error with images ImageFile.LOAD_TRUNCATED_IMAGES = True use_cuda = torch.cuda.is_available() use_cuda # # Load data # !pip install gdown # !gdown https://drive.google.com/uc?id=1FxvmwTrYZsMyCMfH_mcrLJ2oStXABQG_ # with zipfile.ZipFile("/kaggle/working/dataset.zip", 'r') as zip_ref: # zip_ref.extractall("/kaggle/working/") # !mkdir -p /kaggle/working/chest_xray/total/NORMAL # !mkdir -p /kaggle/working/chest_xray/total/PNEUMONIA # !rsync -a /kaggle/working/chest_xray/train/NORMAL/ /kaggle/working/chest_xray/total/NORMAL/ # !rsync -a /kaggle/working/chest_xray/train/PNEUMONIA/ /kaggle/working/chest_xray/total/PNEUMONIA/ # !rsync -a /kaggle/working/chest_xray/test/NORMAL/ /kaggle/working/chest_xray/total/NORMAL/ # !rsync -a /kaggle/working/chest_xray/test/PNEUMONIA/ /kaggle/working/chest_xray/total/PNEUMONIA/ # # Combine data and train val test split # !pip install python_splitter # import python_splitter # python_splitter.split_from_folder("/kaggle/working/chest_xray/total", train=0.7, test=0.15, val=0.15) # # Data loader train_dir = "/kaggle/working/Train_Test_Folder/train" val_dir = "/kaggle/working/Train_Test_Folder/val" test_dir = "/kaggle/working/Train_Test_Folder/test" img_size = [400, 500] batch_size = 8 transform = transforms.Compose( [ transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ] ) train_dataset = ImageFolder(train_dir, transform=transform) val_dataset = ImageFolder(val_dir, transform=transform) test_dataset = ImageFolder(test_dir, transform=transform) # Define data loaders for each set train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True) classes = train_dataset.classes print(f"Nuber of classes : {len(classes)}") # # Sample images # functions to show an image def imshow(img): npimg = np.array(img.permute(1, 2, 0)) print(npimg.shape) plt.imshow(npimg) plt.show() # get some random training images dataiter = iter(train_loader) images, labels = next(dataiter) # show images imshow( torchvision.utils.make_grid( images, ) ) # # Load Model device = torch.device("cuda") net = torch.hub.load( "NVIDIA/DeepLearningExamples:torchhub", "nvidia_efficientnet_b0", pretrained=True ) # # Create new top layers class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(1280, 512) self.fc2 = nn.Linear(512, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return x subnet = Net() print(net) # # Combine pretrained model with new top layers net.classifier.fc = subnet # # Freeze all layers except top layers for par in net.parameters(): par.requires_grad = False for par in net.classifier.fc.parameters(): par.requires_grad = True # # Loss and optimizer criterion = nn.CrossEntropyLoss().to(device) optimizer = optim.Adam(net.parameters()) # Move model on GPU device = torch.device("cuda") net = net.to(device) # # Train the model def train_one_epoch(epoch_index): running_loss = 0.0 last_loss = 0.0 for i, data in enumerate(train_loader): # Every data instance is an input + label pair inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # Zero your gradients for every batch! optimizer.zero_grad() # Make predictions for this batch outputs = net(inputs) # Compute the loss and its gradients loss = criterion(outputs, labels) loss.backward() # Adjust learning weights optimizer.step() # Gather data and report running_loss += loss.item() if i % 100 == 99: last_loss = running_loss / 100 # loss per batch print(" batch {} loss: {}".format(i + 1, last_loss)) running_loss = 0.0 return last_loss timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") epoch_number = 0 best_vloss = 1_000_000.0 EPOCHS = 5 for epoch in range(EPOCHS): print("EPOCH {}:".format(epoch_number + 1)) # Make sure gradient tracking is on, and do a pass over the data net.train(True) avg_loss = train_one_epoch(epoch_number) # We don't need gradients on to do reporting net.train(False) running_vloss = 0.0 for i, vdata in enumerate(val_loader): vinputs, vlabels = vdata vinputs, vlabels = vinputs.to(device), vlabels.to(device) voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() avg_vloss = running_vloss / (i + 1) print(" LOSS train {} valid {}".format(avg_loss, avg_vloss)) net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( " Accuracy of the model on the Validation images: {} %".format( 100 * correct / total ) ) # Track best performance, and save the model's state if avg_vloss < best_vloss: best_vloss = avg_vloss model_path = "model_{}".format(timestamp) torch.save(net.state_dict(), model_path) epoch_number += 1 print("-" * 20) print("Finished Training") # Load best model net.load_state_dict(torch.load("/kaggle/working/model_20230511_232619")) # # Evaluate the model on test net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( "Accuracy of the model on the test images: {} %".format(100 * correct / total) ) # # Unfreeze all layers and Fine tune the model optimizer = optim.Adam(net.parameters(), lr=0.00001) for par in net.parameters(): par.requires_grad = True epoch_number = 0 EPOCHS = 5 for epoch in range(EPOCHS): print("EPOCH {}:".format(epoch_number + 1)) # Make sure gradient tracking is on, and do a pass over the data net.train(True) avg_loss = train_one_epoch(epoch_number) # We don't need gradients on to do reporting net.train(False) running_vloss = 0.0 for i, vdata in enumerate(val_loader): vinputs, vlabels = vdata vinputs, vlabels = vinputs.to(device), vlabels.to(device) voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() avg_vloss = running_vloss / (i + 1) print(" LOSS train {} valid {}".format(avg_loss, avg_vloss)) net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( " Accuracy of the model on the Validation images: {} %".format( 100 * correct / total ) ) # Track best performance, and save the model's state if avg_vloss < best_vloss: best_vloss = avg_vloss model_path = "model_fine_tuned" torch.save(net.state_dict(), model_path) epoch_number += 1 print("-" * 20) print("Finished Training") # Load best model net.load_state_dict(torch.load("/kaggle/working/model_fine_tuned")) # # Evaluate the model on test net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( "Accuracy of the model on the test images: {} %".format(100 * correct / total) ) # # Save the model torch.save(net.state_dict(), "Final_model")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/219/129219106.ipynb
null
null
[{"Id": 129219106, "ScriptId": 38415330, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10895837, "CreationDate": "05/12/2023 00:16:41", "VersionNumber": 2.0, "Title": "X-rays classification using torch", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 348.0, "LinesInsertedFromPrevious": 147.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 201.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # Load libraries import os import numpy as np import pandas as pd from glob import glob import torch import torchvision from torchvision import transforms from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader, random_split from PIL import Image, ImageFile import matplotlib.pyplot as plt from datetime import datetime import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import zipfile # Avoid error with images ImageFile.LOAD_TRUNCATED_IMAGES = True use_cuda = torch.cuda.is_available() use_cuda # # Load data # !pip install gdown # !gdown https://drive.google.com/uc?id=1FxvmwTrYZsMyCMfH_mcrLJ2oStXABQG_ # with zipfile.ZipFile("/kaggle/working/dataset.zip", 'r') as zip_ref: # zip_ref.extractall("/kaggle/working/") # !mkdir -p /kaggle/working/chest_xray/total/NORMAL # !mkdir -p /kaggle/working/chest_xray/total/PNEUMONIA # !rsync -a /kaggle/working/chest_xray/train/NORMAL/ /kaggle/working/chest_xray/total/NORMAL/ # !rsync -a /kaggle/working/chest_xray/train/PNEUMONIA/ /kaggle/working/chest_xray/total/PNEUMONIA/ # !rsync -a /kaggle/working/chest_xray/test/NORMAL/ /kaggle/working/chest_xray/total/NORMAL/ # !rsync -a /kaggle/working/chest_xray/test/PNEUMONIA/ /kaggle/working/chest_xray/total/PNEUMONIA/ # # Combine data and train val test split # !pip install python_splitter # import python_splitter # python_splitter.split_from_folder("/kaggle/working/chest_xray/total", train=0.7, test=0.15, val=0.15) # # Data loader train_dir = "/kaggle/working/Train_Test_Folder/train" val_dir = "/kaggle/working/Train_Test_Folder/val" test_dir = "/kaggle/working/Train_Test_Folder/test" img_size = [400, 500] batch_size = 8 transform = transforms.Compose( [ transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ] ) train_dataset = ImageFolder(train_dir, transform=transform) val_dataset = ImageFolder(val_dir, transform=transform) test_dataset = ImageFolder(test_dir, transform=transform) # Define data loaders for each set train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True) classes = train_dataset.classes print(f"Nuber of classes : {len(classes)}") # # Sample images # functions to show an image def imshow(img): npimg = np.array(img.permute(1, 2, 0)) print(npimg.shape) plt.imshow(npimg) plt.show() # get some random training images dataiter = iter(train_loader) images, labels = next(dataiter) # show images imshow( torchvision.utils.make_grid( images, ) ) # # Load Model device = torch.device("cuda") net = torch.hub.load( "NVIDIA/DeepLearningExamples:torchhub", "nvidia_efficientnet_b0", pretrained=True ) # # Create new top layers class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(1280, 512) self.fc2 = nn.Linear(512, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return x subnet = Net() print(net) # # Combine pretrained model with new top layers net.classifier.fc = subnet # # Freeze all layers except top layers for par in net.parameters(): par.requires_grad = False for par in net.classifier.fc.parameters(): par.requires_grad = True # # Loss and optimizer criterion = nn.CrossEntropyLoss().to(device) optimizer = optim.Adam(net.parameters()) # Move model on GPU device = torch.device("cuda") net = net.to(device) # # Train the model def train_one_epoch(epoch_index): running_loss = 0.0 last_loss = 0.0 for i, data in enumerate(train_loader): # Every data instance is an input + label pair inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # Zero your gradients for every batch! optimizer.zero_grad() # Make predictions for this batch outputs = net(inputs) # Compute the loss and its gradients loss = criterion(outputs, labels) loss.backward() # Adjust learning weights optimizer.step() # Gather data and report running_loss += loss.item() if i % 100 == 99: last_loss = running_loss / 100 # loss per batch print(" batch {} loss: {}".format(i + 1, last_loss)) running_loss = 0.0 return last_loss timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") epoch_number = 0 best_vloss = 1_000_000.0 EPOCHS = 5 for epoch in range(EPOCHS): print("EPOCH {}:".format(epoch_number + 1)) # Make sure gradient tracking is on, and do a pass over the data net.train(True) avg_loss = train_one_epoch(epoch_number) # We don't need gradients on to do reporting net.train(False) running_vloss = 0.0 for i, vdata in enumerate(val_loader): vinputs, vlabels = vdata vinputs, vlabels = vinputs.to(device), vlabels.to(device) voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() avg_vloss = running_vloss / (i + 1) print(" LOSS train {} valid {}".format(avg_loss, avg_vloss)) net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( " Accuracy of the model on the Validation images: {} %".format( 100 * correct / total ) ) # Track best performance, and save the model's state if avg_vloss < best_vloss: best_vloss = avg_vloss model_path = "model_{}".format(timestamp) torch.save(net.state_dict(), model_path) epoch_number += 1 print("-" * 20) print("Finished Training") # Load best model net.load_state_dict(torch.load("/kaggle/working/model_20230511_232619")) # # Evaluate the model on test net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( "Accuracy of the model on the test images: {} %".format(100 * correct / total) ) # # Unfreeze all layers and Fine tune the model optimizer = optim.Adam(net.parameters(), lr=0.00001) for par in net.parameters(): par.requires_grad = True epoch_number = 0 EPOCHS = 5 for epoch in range(EPOCHS): print("EPOCH {}:".format(epoch_number + 1)) # Make sure gradient tracking is on, and do a pass over the data net.train(True) avg_loss = train_one_epoch(epoch_number) # We don't need gradients on to do reporting net.train(False) running_vloss = 0.0 for i, vdata in enumerate(val_loader): vinputs, vlabels = vdata vinputs, vlabels = vinputs.to(device), vlabels.to(device) voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() avg_vloss = running_vloss / (i + 1) print(" LOSS train {} valid {}".format(avg_loss, avg_vloss)) net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( " Accuracy of the model on the Validation images: {} %".format( 100 * correct / total ) ) # Track best performance, and save the model's state if avg_vloss < best_vloss: best_vloss = avg_vloss model_path = "model_fine_tuned" torch.save(net.state_dict(), model_path) epoch_number += 1 print("-" * 20) print("Finished Training") # Load best model net.load_state_dict(torch.load("/kaggle/working/model_fine_tuned")) # # Evaluate the model on test net.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print( "Accuracy of the model on the test images: {} %".format(100 * correct / total) ) # # Save the model torch.save(net.state_dict(), "Final_model")
false
0
2,757
1
2,757
2,757
129219370
<jupyter_start><jupyter_text>Starbucks Nutrition Facts ``` Nutrition facts for several Starbucks food items ``` | Column | Description | | ------- | ------------------------------------------------------------ | | item | The name of the food item. | | calories| The amount of calories in the food item. | | fat | The quantity of fat in grams present in the food item. | | carb | The amount of carbohydrates in grams found in the food item. | | fiber | The quantity of dietary fiber in grams in the food item. | | protein | The amount of protein in grams contained in the food item. | | type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). | Kaggle dataset identifier: starbucks-nutrition <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv") # # Taking general look at the dataset df.head() df.shape df = df.iloc[:, 1:] df.isna().sum() def plots(df, _x, _y): fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) sns.histplot(data=df, x=_x, kde=True, color="r", ax=axes[0]) sns.histplot(data=df, x=_x, kde=True, hue=_y, color="r", ax=axes[1]) plt.show() # # Counting amount of categories palette = sns.color_palette("summer") plt.pie( df["type"].value_counts(), labels=df["type"].value_counts().index, autopct="%0.2f%%", colors=palette, shadow=True, ) # # Overall look using Pairplot sns.pairplot(df, vars=["calories", "fat", "carb", "fiber", "protein"], hue="type") # # Thorough look at histplots of numerical features for i in ["calories", "fat", "carb", "fiber", "protein"]: plots(df, i, "type") # # Correlation of numerical features # All values have very good correlation coefficient between each other, except fiber # Calories depend on carb, fat and protein numerical_cols = ["calories", "fat", "carb", "fiber", "protein"] nums = df[numerical_cols].copy() corr = nums.corr() sns.heatmap(corr, annot=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/219/129219370.ipynb
starbucks-nutrition
utkarshx27
[{"Id": 129219370, "ScriptId": 38404168, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11036701, "CreationDate": "05/12/2023 00:21:15", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts EDA", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 52.0, "LinesInsertedFromPrevious": 52.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185071293, "KernelVersionId": 129219370, "SourceDatasetVersionId": 5651811}]
[{"Id": 5651811, "DatasetId": 3248696, "DatasourceVersionId": 5727183, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:42:59", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts", "Slug": "starbucks-nutrition", "Subtitle": "Nutrition facts for several Starbucks food items", "Description": "```\nNutrition facts for several Starbucks food items\n```\n| Column | Description |\n| ------- | ------------------------------------------------------------ |\n| item | The name of the food item. |\n| calories| The amount of calories in the food item. |\n| fat | The quantity of fat in grams present in the food item. |\n| carb | The amount of carbohydrates in grams found in the food item. |\n| fiber | The quantity of dietary fiber in grams in the food item. |\n| protein | The amount of protein in grams contained in the food item. |\n| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3248696, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651811.0, "CurrentDatasourceVersionId": 5727183.0, "ForumId": 3314049, "Type": 2, "CreationDate": "05/10/2023 05:42:59", "LastActivityDate": "05/10/2023", "TotalViews": 12557, "TotalDownloads": 2321, "TotalVotes": 59, "TotalKernels": 17}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv") # # Taking general look at the dataset df.head() df.shape df = df.iloc[:, 1:] df.isna().sum() def plots(df, _x, _y): fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) sns.histplot(data=df, x=_x, kde=True, color="r", ax=axes[0]) sns.histplot(data=df, x=_x, kde=True, hue=_y, color="r", ax=axes[1]) plt.show() # # Counting amount of categories palette = sns.color_palette("summer") plt.pie( df["type"].value_counts(), labels=df["type"].value_counts().index, autopct="%0.2f%%", colors=palette, shadow=True, ) # # Overall look using Pairplot sns.pairplot(df, vars=["calories", "fat", "carb", "fiber", "protein"], hue="type") # # Thorough look at histplots of numerical features for i in ["calories", "fat", "carb", "fiber", "protein"]: plots(df, i, "type") # # Correlation of numerical features # All values have very good correlation coefficient between each other, except fiber # Calories depend on carb, fat and protein numerical_cols = ["calories", "fat", "carb", "fiber", "protein"] nums = df[numerical_cols].copy() corr = nums.corr() sns.heatmap(corr, annot=True)
false
1
462
2
678
462
129219940
# # Analyse the LEGO dataset # # Introduction # Today we'll dive deep into a dataset all about LEGO. From the dataset we can ask whole bunch of interesting questions about the history of the LEGO company, their product offering, and which LEGO set ultimately rules them all: # What is the most enormous LEGO set ever created and how many parts did it have? # How did the LEGO company start out? In which year were the first LEGO sets released and how many sets did the company sell when it first launched? # Which LEGO theme has the most sets? Is it one of LEGO's own themes like Ninjago or a theme they licensed liked Harry Potter or Marvel Superheroes? # When did the LEGO company really expand its product offering? Can we spot a change in the company strategy based on how many themes and sets did it released year-on-year? # Did LEGO sets grow in size and complexity over time? Do older LEGO # sets tend to have more or fewer parts than newer sets? # **Data Source** # [Rebrickable](https://rebrickable.com/downloads/) has compiled data on all the LEGO pieces in existence. # # Import Statements from google.colab import drive drive.mount("/content/drive") import pandas as pd import matplotlib.pyplot as plt colors = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/colors.csv" ) sets = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/sets.csv" ) themes = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/themes.csv" ) print(colors.head()) print(colors.shape) print(sets.head()) print(sets.shape) print(themes.head()) print(themes.shape) # # Data Exploration # First, we will explore how many different colours does the LEGO company produce. We will Read the colors.csv file in the data folder and find the total number of unique colours. colors["name"].nunique() # There are 135 uniques colors. # ## Find the number of transparent colours colors.groupby("is_trans").count() # There are 28 transparent colours. # ### Understanding LEGO Themes vs. LEGO Sets # Walk into a LEGO store and you will see their products organised by theme. Their themes include Star Wars, Batman, Harry Potter and many more. # A lego set is a particular box of LEGO or product. Therefore, a single theme typically has many different sets. # # The sets.csv data contains a list of sets over the years and the number of parts that each of these sets contained. sets.head() sets.tail() # Let's find in which year were the first LEGO sets released and what were these sets called? min_year = sets["year"].min() print(min_year) first_sets = sets[sets["year"] == min_year] first_sets # The first LEGO sets was released in 1949, with the names of the sets they released listed above. They sold 5 sets in their first year. # Let's Find the top 5 LEGO sets with the most number of parts. sets.sort_values("num_parts", ascending=False).head() # The largest LEGO set ever produced has around 10,000 pieces! Apparently, only two of these boxes were ever produced, so if you wanted to get your hands on a ridiculously large LEGO set, you'll have to settle for the 7,541 piece Millennium Falcon. # ## Sets Per Year # Next, we will use .groupby() and .count() to show the number of LEGO sets released year-on-year. How do the number of sets released in 1955 compare to the number of sets released in 2019? sets_by_year = sets.groupby("year").count() sets_by_year["set_num"] # Lego seems to release more sets the for every year after. sets_by_year["set_num"].head() sets_by_year["set_num"].tail() # From this, we can see that LEGO published less than 10 different sets per year during its first few years of operation. But by 2019 the company had grown spectacularly, releasing 840 sets in that year alone! # ## Visualize the Number of Sets Published over Time plt.plot(sets_by_year.index, sets_by_year.set_num) # # Note that the .csv file is from late 2020, so to plot the full calendar years, we will have to exclude some data from the chart. plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2]) # We also see that while the first 45 years or so, LEGO had some steady growth in its product offering, but it was really in the mid-1990s that the number of sets produced by the company increased dramatically! We also see a brief decline in the early 2000s and a strong recovery around 2005 in the chart. # ### Aggregate Data with the Python .agg() Function # Let's work out the number of different themes shipped by year. This means we have to count the number of unique theme_ids per calendar year. theme_by_year = sets.groupby("year").agg({"theme_id": pd.Series.nunique}) theme_by_year.rename(columns={"theme_id": "nr_themes"}, inplace="True") theme_by_year.head() # Here we can see that LEGO only had 2 themes during the first few years, but just like the number of sets the number of themes expanded manifold over the years. Let's plot this on a chart again. plt.plot(theme_by_year.index[:-2], theme_by_year.nr_themes[:-2]) # Again, we can see that LEGO has pretty consistently added more and more themes until the mid-1990s. From then the number of themes has stagnated for around 10 years or so until the early 2010s. # ### Line Charts with Two Seperate Axes ax1 = plt.gca() # get current axes ax2 = ax1.twinx() # create another axis that shares the same x-axis # plot ax1.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], color="blue") ax2.plot(theme_by_year.index[:-2], theme_by_year.nr_themes[:-2], color="orange") # add styling ax1.set_xlabel("Year") ax1.set_ylabel("Number of Sets", color="blue") ax2.set_ylabel("Number of Themes", color="orange") # Next, let's figure out the average number of parts per set. How many parts did the average LEGO set released in 1954 compared to say, 2017? parts_per_set = sets.groupby("year").agg({"num_parts": pd.Series.mean}) parts_per_set.rename(columns={"num_parts": "avg_num_parts"}, inplace="True") parts_per_set # ### Scatter Plots in Matplotlib # Has the size and complexity of LEGO sets increased over time based on the number of parts? Next, we will plot the average number of parts over time using a Matplotlib scatter plot. plt.scatter(parts_per_set[:-2].index, parts_per_set[:-2]["avg_num_parts"]) # From the chart, we can see an upward trend in the size and complexity of the LEGO sets based on the average number of parts. In the 2010s the average set contained around 200 individual pieces, which is roughly double what average LEGO set used to contain in the 1960s. # ### Number of Sets per LEGO Theme # LEGO has licensed many hit franchises from Harry Potter to Marvel Super Heros to many others. But which theme has the largest number of individual sets? set_theme_count = sets["theme_id"].value_counts() set_theme_count set_theme_count = pd.DataFrame( {"id": set_theme_count.index, "set_count": set_theme_count.values} ) set_theme_count # Hmm not very informative, we don't know what are those nunmbers actually represented. So it's time for use to combine dataframe toghether. # This is the database schema # ### Database Schemas, Foreign Keys and Merging DataFrames # The themes.csv file has the actual theme names. The sets .csv has theme_ids which link to the id column in the themes.csv. # We will explore the themes.csv. How is it structured?. and see how many ids correspond to this name in the themes.csv? themes.head() # There aree only 3 columns in the `themes.csv`. that are, `id` of the theme, `name` of the theme, and the `parent_id` of the theme. themes[themes["name"] == "Star Wars"] # Not every name is unique! Theree are 4 ids that correspond to the 'Star Wars' name in the `themes.csv` # ### Merging Themes and Sets # Wouldn't it be nice if we could combine our data on theme names with the number sets per theme? merged_df = pd.merge(set_theme_count, themes, on="id") merged_df # Yayy, let's draw the bar chart now! plt.figure(figsize=(14, 8)) plt.xticks(fontsize=14, rotation=45) plt.yticks(fontsize=14) plt.ylabel("Nr of Sets", fontsize=14) plt.xlabel("Theme Name", fontsize=14) plt.bar(merged_df.name[:10], merged_df.set_count[:10])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/219/129219940.ipynb
null
null
[{"Id": 129219940, "ScriptId": 38417399, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8032771, "CreationDate": "05/12/2023 00:30:49", "VersionNumber": 1.0, "Title": "Analyse the LEGO dataset", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 207.0, "LinesInsertedFromPrevious": 207.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Analyse the LEGO dataset # # Introduction # Today we'll dive deep into a dataset all about LEGO. From the dataset we can ask whole bunch of interesting questions about the history of the LEGO company, their product offering, and which LEGO set ultimately rules them all: # What is the most enormous LEGO set ever created and how many parts did it have? # How did the LEGO company start out? In which year were the first LEGO sets released and how many sets did the company sell when it first launched? # Which LEGO theme has the most sets? Is it one of LEGO's own themes like Ninjago or a theme they licensed liked Harry Potter or Marvel Superheroes? # When did the LEGO company really expand its product offering? Can we spot a change in the company strategy based on how many themes and sets did it released year-on-year? # Did LEGO sets grow in size and complexity over time? Do older LEGO # sets tend to have more or fewer parts than newer sets? # **Data Source** # [Rebrickable](https://rebrickable.com/downloads/) has compiled data on all the LEGO pieces in existence. # # Import Statements from google.colab import drive drive.mount("/content/drive") import pandas as pd import matplotlib.pyplot as plt colors = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/colors.csv" ) sets = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/sets.csv" ) themes = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks (1)/LEGO Notebook and Data (start)/data/themes.csv" ) print(colors.head()) print(colors.shape) print(sets.head()) print(sets.shape) print(themes.head()) print(themes.shape) # # Data Exploration # First, we will explore how many different colours does the LEGO company produce. We will Read the colors.csv file in the data folder and find the total number of unique colours. colors["name"].nunique() # There are 135 uniques colors. # ## Find the number of transparent colours colors.groupby("is_trans").count() # There are 28 transparent colours. # ### Understanding LEGO Themes vs. LEGO Sets # Walk into a LEGO store and you will see their products organised by theme. Their themes include Star Wars, Batman, Harry Potter and many more. # A lego set is a particular box of LEGO or product. Therefore, a single theme typically has many different sets. # # The sets.csv data contains a list of sets over the years and the number of parts that each of these sets contained. sets.head() sets.tail() # Let's find in which year were the first LEGO sets released and what were these sets called? min_year = sets["year"].min() print(min_year) first_sets = sets[sets["year"] == min_year] first_sets # The first LEGO sets was released in 1949, with the names of the sets they released listed above. They sold 5 sets in their first year. # Let's Find the top 5 LEGO sets with the most number of parts. sets.sort_values("num_parts", ascending=False).head() # The largest LEGO set ever produced has around 10,000 pieces! Apparently, only two of these boxes were ever produced, so if you wanted to get your hands on a ridiculously large LEGO set, you'll have to settle for the 7,541 piece Millennium Falcon. # ## Sets Per Year # Next, we will use .groupby() and .count() to show the number of LEGO sets released year-on-year. How do the number of sets released in 1955 compare to the number of sets released in 2019? sets_by_year = sets.groupby("year").count() sets_by_year["set_num"] # Lego seems to release more sets the for every year after. sets_by_year["set_num"].head() sets_by_year["set_num"].tail() # From this, we can see that LEGO published less than 10 different sets per year during its first few years of operation. But by 2019 the company had grown spectacularly, releasing 840 sets in that year alone! # ## Visualize the Number of Sets Published over Time plt.plot(sets_by_year.index, sets_by_year.set_num) # # Note that the .csv file is from late 2020, so to plot the full calendar years, we will have to exclude some data from the chart. plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2]) # We also see that while the first 45 years or so, LEGO had some steady growth in its product offering, but it was really in the mid-1990s that the number of sets produced by the company increased dramatically! We also see a brief decline in the early 2000s and a strong recovery around 2005 in the chart. # ### Aggregate Data with the Python .agg() Function # Let's work out the number of different themes shipped by year. This means we have to count the number of unique theme_ids per calendar year. theme_by_year = sets.groupby("year").agg({"theme_id": pd.Series.nunique}) theme_by_year.rename(columns={"theme_id": "nr_themes"}, inplace="True") theme_by_year.head() # Here we can see that LEGO only had 2 themes during the first few years, but just like the number of sets the number of themes expanded manifold over the years. Let's plot this on a chart again. plt.plot(theme_by_year.index[:-2], theme_by_year.nr_themes[:-2]) # Again, we can see that LEGO has pretty consistently added more and more themes until the mid-1990s. From then the number of themes has stagnated for around 10 years or so until the early 2010s. # ### Line Charts with Two Seperate Axes ax1 = plt.gca() # get current axes ax2 = ax1.twinx() # create another axis that shares the same x-axis # plot ax1.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], color="blue") ax2.plot(theme_by_year.index[:-2], theme_by_year.nr_themes[:-2], color="orange") # add styling ax1.set_xlabel("Year") ax1.set_ylabel("Number of Sets", color="blue") ax2.set_ylabel("Number of Themes", color="orange") # Next, let's figure out the average number of parts per set. How many parts did the average LEGO set released in 1954 compared to say, 2017? parts_per_set = sets.groupby("year").agg({"num_parts": pd.Series.mean}) parts_per_set.rename(columns={"num_parts": "avg_num_parts"}, inplace="True") parts_per_set # ### Scatter Plots in Matplotlib # Has the size and complexity of LEGO sets increased over time based on the number of parts? Next, we will plot the average number of parts over time using a Matplotlib scatter plot. plt.scatter(parts_per_set[:-2].index, parts_per_set[:-2]["avg_num_parts"]) # From the chart, we can see an upward trend in the size and complexity of the LEGO sets based on the average number of parts. In the 2010s the average set contained around 200 individual pieces, which is roughly double what average LEGO set used to contain in the 1960s. # ### Number of Sets per LEGO Theme # LEGO has licensed many hit franchises from Harry Potter to Marvel Super Heros to many others. But which theme has the largest number of individual sets? set_theme_count = sets["theme_id"].value_counts() set_theme_count set_theme_count = pd.DataFrame( {"id": set_theme_count.index, "set_count": set_theme_count.values} ) set_theme_count # Hmm not very informative, we don't know what are those nunmbers actually represented. So it's time for use to combine dataframe toghether. # This is the database schema # ### Database Schemas, Foreign Keys and Merging DataFrames # The themes.csv file has the actual theme names. The sets .csv has theme_ids which link to the id column in the themes.csv. # We will explore the themes.csv. How is it structured?. and see how many ids correspond to this name in the themes.csv? themes.head() # There aree only 3 columns in the `themes.csv`. that are, `id` of the theme, `name` of the theme, and the `parent_id` of the theme. themes[themes["name"] == "Star Wars"] # Not every name is unique! Theree are 4 ids that correspond to the 'Star Wars' name in the `themes.csv` # ### Merging Themes and Sets # Wouldn't it be nice if we could combine our data on theme names with the number sets per theme? merged_df = pd.merge(set_theme_count, themes, on="id") merged_df # Yayy, let's draw the bar chart now! plt.figure(figsize=(14, 8)) plt.xticks(fontsize=14, rotation=45) plt.yticks(fontsize=14) plt.ylabel("Nr of Sets", fontsize=14) plt.xlabel("Theme Name", fontsize=14) plt.bar(merged_df.name[:10], merged_df.set_count[:10])
false
0
2,420
0
2,420
2,420
129724345
<jupyter_start><jupyter_text>Bank Customers Churn ### Context A dataset which contain some customers who are withdrawing their account from the bank due to some loss and other issues with the help this data we try to analyse and maintain accuracy. ### Content What's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too. Kaggle dataset identifier: bank-customers <jupyter_code>import pandas as pd df = pd.read_csv('bank-customers/Churn Modeling.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 10000 entries, 0 to 9999 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 RowNumber 10000 non-null int64 1 CustomerId 10000 non-null int64 2 Surname 10000 non-null object 3 CreditScore 10000 non-null int64 4 Geography 10000 non-null object 5 Gender 10000 non-null object 6 Age 10000 non-null int64 7 Tenure 10000 non-null int64 8 Balance 10000 non-null float64 9 NumOfProducts 10000 non-null int64 10 HasCrCard 10000 non-null int64 11 IsActiveMember 10000 non-null int64 12 EstimatedSalary 10000 non-null float64 13 Exited 10000 non-null int64 dtypes: float64(2), int64(9), object(3) memory usage: 1.1+ MB <jupyter_text>Examples: { "RowNumber": 1, "CustomerId": 15634602, "Surname": "Hargrave", "CreditScore": 619, "Geography": "France", "Gender": "Female", "Age": 42, "Tenure": 2, "Balance": 0.0, "NumOfProducts": 1, "HasCrCard": 1, "IsActiveMember": 1, "EstimatedSalary": 101348.88, "Exited": 1 } { "RowNumber": 2, "CustomerId": 15647311, "Surname": "Hill", "CreditScore": 608, "Geography": "Spain", "Gender": "Female", "Age": 41, "Tenure": 1, "Balance": 83807.86, "NumOfProducts": 1, "HasCrCard": 0, "IsActiveMember": 1, "EstimatedSalary": 112542.58, "Exited": 0 } { "RowNumber": 3, "CustomerId": 15619304, "Surname": "Onio", "CreditScore": 502, "Geography": "France", "Gender": "Female", "Age": 42, "Tenure": 8, "Balance": 159660.8, "NumOfProducts": 3, "HasCrCard": 1, "IsActiveMember": 0, "EstimatedSalary": 113931.57, "Exited": 1 } { "RowNumber": 4, "CustomerId": 15701354, "Surname": "Boni", "CreditScore": 699, "Geography": "France", "Gender": "Female", "Age": 39, "Tenure": 1, "Balance": 0.0, "NumOfProducts": 2, "HasCrCard": 0, "IsActiveMember": 0, "EstimatedSalary": 93826.63, "Exited": 0 } <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score from sklearn import metrics df = pd.read_csv("/kaggle/input/bank-customers/Churn Modeling.csv") # ### Part 1 :Basic Understanding on the Dataset # df.head() df.info() # **As shown above, the dataset's dimension is 14 X 10000 rows # which has no Null values. No data pre-processing is needed** # **** # Countries involved print(df["Geography"].unique()) print(df["Geography"].value_counts()) # df.to_csv("/kaggle/working/bank_customer.csv") # # **Customers in France, Spain and Germany are involved.** # **Number of customers for each countries are listed also.** print(df["Exited"].unique()) print(df["Exited"].value_counts()) # **As shown below, about 20% of Customers have exited. ** # ### Part 2 : EDA and Data Visualization on Dataset # **The first three columns : RowNumber, CustomerId, Surname are not necessary for data analysis / visualization. Therefore, these columns will be deleted.** df_new = df.iloc[:, 3:] df_new = pd.get_dummies(data=df_new, columns=["Geography", "Gender"]) # df_normalize = df_new # cols_to_norm = ['CreditScore','Age','Balance','EstimatedSalary'] # df_normalize[cols_to_norm] = df_normalize[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max() - x.min())) # **Average Account Balance of Different Countries** fig = plt.figure(figsize=(9, 6)) # Creating plot sns.boxplot(x=df["Geography"], y=df["Balance"], data=df) # add title and axis labels plt.title("Boxplot of Account Balance") plt.xlabel("All Countries") plt.ylabel("Average Account Balance Plot") # show plot plt.show() fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(25, 20)) # Set the font size for the plot sns.set(font_scale=2.5) # Creation of the four subplots sns.countplot(x="Geography", hue="Exited", data=df, ax=ax[0][0]) sns.countplot(x="Gender", hue="Exited", data=df, ax=ax[0][1]) sns.countplot(x="HasCrCard", hue="Exited", data=df, ax=ax[1][0]) sns.countplot(x="IsActiveMember", hue="Exited", data=df, ax=ax[1][1]) # Add a big title to the subplot fig.suptitle( "Four Subplots Showing Exited vs Variables", fontsize=30, fontweight="bold", y=1.05 ) # **Distribution of Age (Exited vs Existing Customers)** sns.set(font_scale=1.5) sns.kdeplot(x=df["Age"], hue=df["Exited"]) plt.title("Age Distribution of Customers") plt.xlabel("Age") plt.ylabel("Density") # # **Heatmap Showing corelation between variables** sns.set(font_scale=1) plt.figure(figsize=(18, 18)) corr_matrix = df_new.corr() # Plot the heatmap sns.heatmap(corr_matrix, annot=True, cmap="coolwarm") sns.set(font_scale=1) sns.scatterplot(x="Age", y="NumOfProducts", hue="Exited", data=df_new) # ### Part 3 : Binary Classification by Machine Learning Methods import sklearn as sk from sklearn.preprocessing import StandardScaler X = df_new.drop("Exited", axis=1) y = df_new["Exited"] # Normalize the dataframe scaler = StandardScaler() df_normalized = scaler.fit_transform(X) # Convert the numpy array back to a dataframe df_normalized = pd.DataFrame(df_normalized, columns=X.columns) X_train, X_test, y_train, y_test = train_test_split( df_normalized, y, test_size=0.3, random_state=42 ) print(X_train.shape[0]) print(X_train.shape[1]) # **Function for Printing Accuracy of the Model** def accuracy_print(y_test, y_pred): print(f"Accuracy : {accuracy_score(y_test,y_pred)}") confusion_matrix = metrics.confusion_matrix(y_test, y_pred) sns.reset_orig() cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=confusion_matrix, display_labels=[False, True] ) cm_display.plot() plt.show() # **Logistic Regression** from sklearn.linear_model import LogisticRegression # Train a logistic regression model lr = LogisticRegression() lr.fit(X_train, y_train) # Predict the target variable on the test set y_pred = lr.predict(X_test) accuracy_print(y_test, y_pred) # Merge y_pred and y_test into a single dataframe results = pd.concat([y_test.reset_index(drop=True), pd.Series(y_pred)], axis=1) results.columns = ["y_test", "y_pred"] # Export the results to an Excel file # results.to_excel('/kaggle/working/results.xlsx', index=False) # **SVC** # SVC from sklearn.svm import SVC clf = SVC(kernel="poly") clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy_print(y_test, y_pred) # **Decision Tree** # Decision Tree from sklearn.tree import DecisionTreeClassifier # Create Decision Tree classifer object clf = DecisionTreeClassifier(criterion="gini", max_depth=5) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) accuracy_print(y_test, y_pred) # **Neural Network** from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout # Define the model model = Sequential() model.add(Dense(units=128, activation="relu", input_shape=(X_train.shape[1],))) model.add(Dropout(0.2)) model.add(Dense(units=64, activation="sigmoid")) model.add(Dropout(0.2)) model.add(Dense(units=32, activation="relu")) model.add(Dropout(0.2)) model.add(Dense(units=16, activation="sigmoid")) model.add(Dropout(0.2)) model.add(Dense(units=1, activation="relu")) # Compile the model model.compile(optimizer="adam", loss="mean_squared_error", metrics=["accuracy"]) # Train the model model.fit(X_train, y_train, epochs=50, batch_size=10) # Make predictions on the testing data y_pred = model.predict(X_test) # print(f"Accuracy : {accuracy_score(y_test,y_pred)}") # Merge y_pred and y_test into a single dataframe # results = pd.concat([y_test.reset_index(drop=True), pd.Series(y_pred)], axis=1) # results.columns = ['y_test', 'y_pred'] # Export the results to an Excel file # results.to_excel('/kaggle/working/results.xlsx', index=False) # Create a DataFrame from the predicted and actual results results_df = pd.DataFrame({"y_test": y_test, "y_pred": y_pred.flatten()}) # Save the DataFrame to an Excel file results_df.to_excel("results.xlsx", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/724/129724345.ipynb
bank-customers
santoshd3
[{"Id": 129724345, "ScriptId": 38102220, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1017090, "CreationDate": "05/16/2023 03:33:02", "VersionNumber": 1.0, "Title": "Bank Customers Churn EDA and Machine Learning", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 229.0, "LinesInsertedFromPrevious": 229.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186065249, "KernelVersionId": 129724345, "SourceDatasetVersionId": 51854}]
[{"Id": 51854, "DatasetId": 35847, "DatasourceVersionId": 54245, "CreatorUserId": 1792385, "LicenseName": "Other (specified in description)", "CreationDate": "07/09/2018 09:06:39", "VersionNumber": 1.0, "Title": "Bank Customers Churn", "Slug": "bank-customers", "Subtitle": "Artificial Neural Network Model using Keras and Tensorflow with 85% Acuuracy", "Description": "### Context\n\nA dataset which contain some customers who are withdrawing their account from the bank due to some loss and other issues with the help this data we try to analyse and maintain accuracy.\n\n\n### Content\n\nWhat's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too.\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 684858.0, "TotalUncompressedBytes": 684858.0}]
[{"Id": 35847, "CreatorUserId": 1792385, "OwnerUserId": 1792385.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 51854.0, "CurrentDatasourceVersionId": 54245.0, "ForumId": 44254, "Type": 2, "CreationDate": "07/09/2018 09:06:39", "LastActivityDate": "07/09/2018", "TotalViews": 80241, "TotalDownloads": 10036, "TotalVotes": 128, "TotalKernels": 49}]
[{"Id": 1792385, "UserName": "santoshd3", "DisplayName": "Santosh kumar", "RegisterDate": "04/05/2018", "PerformanceTier": 2}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score from sklearn import metrics df = pd.read_csv("/kaggle/input/bank-customers/Churn Modeling.csv") # ### Part 1 :Basic Understanding on the Dataset # df.head() df.info() # **As shown above, the dataset's dimension is 14 X 10000 rows # which has no Null values. No data pre-processing is needed** # **** # Countries involved print(df["Geography"].unique()) print(df["Geography"].value_counts()) # df.to_csv("/kaggle/working/bank_customer.csv") # # **Customers in France, Spain and Germany are involved.** # **Number of customers for each countries are listed also.** print(df["Exited"].unique()) print(df["Exited"].value_counts()) # **As shown below, about 20% of Customers have exited. ** # ### Part 2 : EDA and Data Visualization on Dataset # **The first three columns : RowNumber, CustomerId, Surname are not necessary for data analysis / visualization. Therefore, these columns will be deleted.** df_new = df.iloc[:, 3:] df_new = pd.get_dummies(data=df_new, columns=["Geography", "Gender"]) # df_normalize = df_new # cols_to_norm = ['CreditScore','Age','Balance','EstimatedSalary'] # df_normalize[cols_to_norm] = df_normalize[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max() - x.min())) # **Average Account Balance of Different Countries** fig = plt.figure(figsize=(9, 6)) # Creating plot sns.boxplot(x=df["Geography"], y=df["Balance"], data=df) # add title and axis labels plt.title("Boxplot of Account Balance") plt.xlabel("All Countries") plt.ylabel("Average Account Balance Plot") # show plot plt.show() fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(25, 20)) # Set the font size for the plot sns.set(font_scale=2.5) # Creation of the four subplots sns.countplot(x="Geography", hue="Exited", data=df, ax=ax[0][0]) sns.countplot(x="Gender", hue="Exited", data=df, ax=ax[0][1]) sns.countplot(x="HasCrCard", hue="Exited", data=df, ax=ax[1][0]) sns.countplot(x="IsActiveMember", hue="Exited", data=df, ax=ax[1][1]) # Add a big title to the subplot fig.suptitle( "Four Subplots Showing Exited vs Variables", fontsize=30, fontweight="bold", y=1.05 ) # **Distribution of Age (Exited vs Existing Customers)** sns.set(font_scale=1.5) sns.kdeplot(x=df["Age"], hue=df["Exited"]) plt.title("Age Distribution of Customers") plt.xlabel("Age") plt.ylabel("Density") # # **Heatmap Showing corelation between variables** sns.set(font_scale=1) plt.figure(figsize=(18, 18)) corr_matrix = df_new.corr() # Plot the heatmap sns.heatmap(corr_matrix, annot=True, cmap="coolwarm") sns.set(font_scale=1) sns.scatterplot(x="Age", y="NumOfProducts", hue="Exited", data=df_new) # ### Part 3 : Binary Classification by Machine Learning Methods import sklearn as sk from sklearn.preprocessing import StandardScaler X = df_new.drop("Exited", axis=1) y = df_new["Exited"] # Normalize the dataframe scaler = StandardScaler() df_normalized = scaler.fit_transform(X) # Convert the numpy array back to a dataframe df_normalized = pd.DataFrame(df_normalized, columns=X.columns) X_train, X_test, y_train, y_test = train_test_split( df_normalized, y, test_size=0.3, random_state=42 ) print(X_train.shape[0]) print(X_train.shape[1]) # **Function for Printing Accuracy of the Model** def accuracy_print(y_test, y_pred): print(f"Accuracy : {accuracy_score(y_test,y_pred)}") confusion_matrix = metrics.confusion_matrix(y_test, y_pred) sns.reset_orig() cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=confusion_matrix, display_labels=[False, True] ) cm_display.plot() plt.show() # **Logistic Regression** from sklearn.linear_model import LogisticRegression # Train a logistic regression model lr = LogisticRegression() lr.fit(X_train, y_train) # Predict the target variable on the test set y_pred = lr.predict(X_test) accuracy_print(y_test, y_pred) # Merge y_pred and y_test into a single dataframe results = pd.concat([y_test.reset_index(drop=True), pd.Series(y_pred)], axis=1) results.columns = ["y_test", "y_pred"] # Export the results to an Excel file # results.to_excel('/kaggle/working/results.xlsx', index=False) # **SVC** # SVC from sklearn.svm import SVC clf = SVC(kernel="poly") clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy_print(y_test, y_pred) # **Decision Tree** # Decision Tree from sklearn.tree import DecisionTreeClassifier # Create Decision Tree classifer object clf = DecisionTreeClassifier(criterion="gini", max_depth=5) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) accuracy_print(y_test, y_pred) # **Neural Network** from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout # Define the model model = Sequential() model.add(Dense(units=128, activation="relu", input_shape=(X_train.shape[1],))) model.add(Dropout(0.2)) model.add(Dense(units=64, activation="sigmoid")) model.add(Dropout(0.2)) model.add(Dense(units=32, activation="relu")) model.add(Dropout(0.2)) model.add(Dense(units=16, activation="sigmoid")) model.add(Dropout(0.2)) model.add(Dense(units=1, activation="relu")) # Compile the model model.compile(optimizer="adam", loss="mean_squared_error", metrics=["accuracy"]) # Train the model model.fit(X_train, y_train, epochs=50, batch_size=10) # Make predictions on the testing data y_pred = model.predict(X_test) # print(f"Accuracy : {accuracy_score(y_test,y_pred)}") # Merge y_pred and y_test into a single dataframe # results = pd.concat([y_test.reset_index(drop=True), pd.Series(y_pred)], axis=1) # results.columns = ['y_test', 'y_pred'] # Export the results to an Excel file # results.to_excel('/kaggle/working/results.xlsx', index=False) # Create a DataFrame from the predicted and actual results results_df = pd.DataFrame({"y_test": y_test, "y_pred": y_pred.flatten()}) # Save the DataFrame to an Excel file results_df.to_excel("results.xlsx", index=False)
[{"bank-customers/Churn Modeling.csv": {"column_names": "[\"RowNumber\", \"CustomerId\", \"Surname\", \"CreditScore\", \"Geography\", \"Gender\", \"Age\", \"Tenure\", \"Balance\", \"NumOfProducts\", \"HasCrCard\", \"IsActiveMember\", \"EstimatedSalary\", \"Exited\"]", "column_data_types": "{\"RowNumber\": \"int64\", \"CustomerId\": \"int64\", \"Surname\": \"object\", \"CreditScore\": \"int64\", \"Geography\": \"object\", \"Gender\": \"object\", \"Age\": \"int64\", \"Tenure\": \"int64\", \"Balance\": \"float64\", \"NumOfProducts\": \"int64\", \"HasCrCard\": \"int64\", \"IsActiveMember\": \"int64\", \"EstimatedSalary\": \"float64\", \"Exited\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10000 entries, 0 to 9999\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 RowNumber 10000 non-null int64 \n 1 CustomerId 10000 non-null int64 \n 2 Surname 10000 non-null object \n 3 CreditScore 10000 non-null int64 \n 4 Geography 10000 non-null object \n 5 Gender 10000 non-null object \n 6 Age 10000 non-null int64 \n 7 Tenure 10000 non-null int64 \n 8 Balance 10000 non-null float64\n 9 NumOfProducts 10000 non-null int64 \n 10 HasCrCard 10000 non-null int64 \n 11 IsActiveMember 10000 non-null int64 \n 12 EstimatedSalary 10000 non-null float64\n 13 Exited 10000 non-null int64 \ndtypes: float64(2), int64(9), object(3)\nmemory usage: 1.1+ MB\n", "summary": "{\"RowNumber\": {\"count\": 10000.0, \"mean\": 5000.5, \"std\": 2886.8956799071675, \"min\": 1.0, \"25%\": 2500.75, \"50%\": 5000.5, \"75%\": 7500.25, \"max\": 10000.0}, \"CustomerId\": {\"count\": 10000.0, \"mean\": 15690940.5694, \"std\": 71936.1861227489, \"min\": 15565701.0, \"25%\": 15628528.25, \"50%\": 15690738.0, \"75%\": 15753233.75, \"max\": 15815690.0}, \"CreditScore\": {\"count\": 10000.0, \"mean\": 650.5288, \"std\": 96.65329873613035, \"min\": 350.0, \"25%\": 584.0, \"50%\": 652.0, \"75%\": 718.0, \"max\": 850.0}, \"Age\": {\"count\": 10000.0, \"mean\": 38.9218, \"std\": 10.487806451704609, \"min\": 18.0, \"25%\": 32.0, \"50%\": 37.0, \"75%\": 44.0, \"max\": 92.0}, \"Tenure\": {\"count\": 10000.0, \"mean\": 5.0128, \"std\": 2.8921743770496837, \"min\": 0.0, \"25%\": 3.0, \"50%\": 5.0, \"75%\": 7.0, \"max\": 10.0}, \"Balance\": {\"count\": 10000.0, \"mean\": 76485.889288, \"std\": 62397.405202385955, \"min\": 0.0, \"25%\": 0.0, \"50%\": 97198.54000000001, \"75%\": 127644.24, \"max\": 250898.09}, \"NumOfProducts\": {\"count\": 10000.0, \"mean\": 1.5302, \"std\": 0.5816543579989906, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 4.0}, \"HasCrCard\": {\"count\": 10000.0, \"mean\": 0.7055, \"std\": 0.4558404644751333, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"IsActiveMember\": {\"count\": 10000.0, \"mean\": 0.5151, \"std\": 0.49979692845891893, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"EstimatedSalary\": {\"count\": 10000.0, \"mean\": 100090.239881, \"std\": 57510.49281769816, \"min\": 11.58, \"25%\": 51002.11, \"50%\": 100193.915, \"75%\": 149388.2475, \"max\": 199992.48}, \"Exited\": {\"count\": 10000.0, \"mean\": 0.2037, \"std\": 0.4027685839948609, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}}", "examples": "{\"RowNumber\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"CustomerId\":{\"0\":15634602,\"1\":15647311,\"2\":15619304,\"3\":15701354},\"Surname\":{\"0\":\"Hargrave\",\"1\":\"Hill\",\"2\":\"Onio\",\"3\":\"Boni\"},\"CreditScore\":{\"0\":619,\"1\":608,\"2\":502,\"3\":699},\"Geography\":{\"0\":\"France\",\"1\":\"Spain\",\"2\":\"France\",\"3\":\"France\"},\"Gender\":{\"0\":\"Female\",\"1\":\"Female\",\"2\":\"Female\",\"3\":\"Female\"},\"Age\":{\"0\":42,\"1\":41,\"2\":42,\"3\":39},\"Tenure\":{\"0\":2,\"1\":1,\"2\":8,\"3\":1},\"Balance\":{\"0\":0.0,\"1\":83807.86,\"2\":159660.8,\"3\":0.0},\"NumOfProducts\":{\"0\":1,\"1\":1,\"2\":3,\"3\":2},\"HasCrCard\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0},\"IsActiveMember\":{\"0\":1,\"1\":1,\"2\":0,\"3\":0},\"EstimatedSalary\":{\"0\":101348.88,\"1\":112542.58,\"2\":113931.57,\"3\":93826.63},\"Exited\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0}}"}}]
true
1
<start_data_description><data_path>bank-customers/Churn Modeling.csv: <column_names> ['RowNumber', 'CustomerId', 'Surname', 'CreditScore', 'Geography', 'Gender', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard', 'IsActiveMember', 'EstimatedSalary', 'Exited'] <column_types> {'RowNumber': 'int64', 'CustomerId': 'int64', 'Surname': 'object', 'CreditScore': 'int64', 'Geography': 'object', 'Gender': 'object', 'Age': 'int64', 'Tenure': 'int64', 'Balance': 'float64', 'NumOfProducts': 'int64', 'HasCrCard': 'int64', 'IsActiveMember': 'int64', 'EstimatedSalary': 'float64', 'Exited': 'int64'} <dataframe_Summary> {'RowNumber': {'count': 10000.0, 'mean': 5000.5, 'std': 2886.8956799071675, 'min': 1.0, '25%': 2500.75, '50%': 5000.5, '75%': 7500.25, 'max': 10000.0}, 'CustomerId': {'count': 10000.0, 'mean': 15690940.5694, 'std': 71936.1861227489, 'min': 15565701.0, '25%': 15628528.25, '50%': 15690738.0, '75%': 15753233.75, 'max': 15815690.0}, 'CreditScore': {'count': 10000.0, 'mean': 650.5288, 'std': 96.65329873613035, 'min': 350.0, '25%': 584.0, '50%': 652.0, '75%': 718.0, 'max': 850.0}, 'Age': {'count': 10000.0, 'mean': 38.9218, 'std': 10.487806451704609, 'min': 18.0, '25%': 32.0, '50%': 37.0, '75%': 44.0, 'max': 92.0}, 'Tenure': {'count': 10000.0, 'mean': 5.0128, 'std': 2.8921743770496837, 'min': 0.0, '25%': 3.0, '50%': 5.0, '75%': 7.0, 'max': 10.0}, 'Balance': {'count': 10000.0, 'mean': 76485.889288, 'std': 62397.405202385955, 'min': 0.0, '25%': 0.0, '50%': 97198.54000000001, '75%': 127644.24, 'max': 250898.09}, 'NumOfProducts': {'count': 10000.0, 'mean': 1.5302, 'std': 0.5816543579989906, 'min': 1.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 4.0}, 'HasCrCard': {'count': 10000.0, 'mean': 0.7055, 'std': 0.4558404644751333, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'IsActiveMember': {'count': 10000.0, 'mean': 0.5151, 'std': 0.49979692845891893, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'EstimatedSalary': {'count': 10000.0, 'mean': 100090.239881, 'std': 57510.49281769816, 'min': 11.58, '25%': 51002.11, '50%': 100193.915, '75%': 149388.2475, 'max': 199992.48}, 'Exited': {'count': 10000.0, 'mean': 0.2037, 'std': 0.4027685839948609, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}} <dataframe_info> RangeIndex: 10000 entries, 0 to 9999 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 RowNumber 10000 non-null int64 1 CustomerId 10000 non-null int64 2 Surname 10000 non-null object 3 CreditScore 10000 non-null int64 4 Geography 10000 non-null object 5 Gender 10000 non-null object 6 Age 10000 non-null int64 7 Tenure 10000 non-null int64 8 Balance 10000 non-null float64 9 NumOfProducts 10000 non-null int64 10 HasCrCard 10000 non-null int64 11 IsActiveMember 10000 non-null int64 12 EstimatedSalary 10000 non-null float64 13 Exited 10000 non-null int64 dtypes: float64(2), int64(9), object(3) memory usage: 1.1+ MB <some_examples> {'RowNumber': {'0': 1, '1': 2, '2': 3, '3': 4}, 'CustomerId': {'0': 15634602, '1': 15647311, '2': 15619304, '3': 15701354}, 'Surname': {'0': 'Hargrave', '1': 'Hill', '2': 'Onio', '3': 'Boni'}, 'CreditScore': {'0': 619, '1': 608, '2': 502, '3': 699}, 'Geography': {'0': 'France', '1': 'Spain', '2': 'France', '3': 'France'}, 'Gender': {'0': 'Female', '1': 'Female', '2': 'Female', '3': 'Female'}, 'Age': {'0': 42, '1': 41, '2': 42, '3': 39}, 'Tenure': {'0': 2, '1': 1, '2': 8, '3': 1}, 'Balance': {'0': 0.0, '1': 83807.86, '2': 159660.8, '3': 0.0}, 'NumOfProducts': {'0': 1, '1': 1, '2': 3, '3': 2}, 'HasCrCard': {'0': 1, '1': 0, '2': 1, '3': 0}, 'IsActiveMember': {'0': 1, '1': 1, '2': 0, '3': 0}, 'EstimatedSalary': {'0': 101348.88, '1': 112542.58, '2': 113931.57, '3': 93826.63}, 'Exited': {'0': 1, '1': 0, '2': 1, '3': 0}} <end_description>
2,008
1
3,048
2,008
129724062
<jupyter_start><jupyter_text>sr_testing_data Kaggle dataset identifier: sr-testing-data <jupyter_script># #upgrade pytorch to 1.12 # !pip install /kaggle/input/pytorch112-cu113/{torch-1.12.1+cu113-cp37-cp37m-linux_x86_64.whl,torchvision-0.13.1+cu113-cp37-cp37m-linux_x86_64.whl} # #install nvidia-pyindex # !pip install /kaggle/input/torch-tensorrt-pkg/nvidia_pyindex-1.0.9-py3-none-any.whl # #install nvidia_tensorrt # !mkdir -p /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cublas-cu11-2022.4.8.xyz /tmp/pip/cache/nvidia-cublas-cu11-2022.4.8.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cuda-runtime-cu11-2022.4.25.xyz /tmp/pip/cache/nvidia-cuda-runtime-cu11-2022.4.25.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cudnn-cu11-2022.5.19.xyz /tmp/pip/cache/nvidia-cudnn-cu11-2022.5.19.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cublas_cu117-11.10.1.25-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cuda_runtime_cu117-11.7.60-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cudnn_cu116-8.4.0.27-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_tensorrt-8.4.3.1-cp37-none-linux_x86_64.whl /tmp/pip/cache/ # !pip install --no-index --find-links /tmp/pip/cache/ nvidia_tensorrt # #install torch_tensorrt # !pip install /kaggle/input/torch-tensorrt-pkg/torch_tensorrt-1.2.0-cp37-cp37m-linux_x86_64.whl # #install pytorch_quantization # !pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com # !pip install nvidia-pyindex # !pip install torch-tensorrt==1.3.0 -f https://github.com/pytorch/tensorrt/releases import os from PIL import Image from torchvision import transforms from torch.utils.data import Dataset import torch from torch import nn from torchvision.models import vgg19 import math import torch.nn.functional as F from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import torch.optim.lr_scheduler as lr_scheduler import copy from torch.autograd import Function, Variable from collections import namedtuple, OrderedDict from torchvision.transforms.functional import to_tensor import time import gc import psutil import cv2 import imageio # import tensorrt # import torch_tensorrt # ## Ult def gaussian(window_size, sigma): gauss = torch.Tensor( [ math.exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) for x in range(window_size) ] ) return gauss / gauss.sum() def create_window(window_size, channel): _1D_window = gaussian(window_size, 1.5).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() return window def calc_psnr(sr, hr, scale, rgb_range): """calculate psnr""" hr = np.float32(hr) sr = np.float32(sr) diff = (sr - hr) / rgb_range gray_coeffs = np.array([65.738, 129.057, 25.064]).reshape((1, 3, 1, 1)) / 256 diff = np.multiply(diff, gray_coeffs).sum(1) if hr.size == 1: return 0 if scale != 1: shave = scale else: shave = scale + 6 if scale == 1: valid = diff else: valid = diff[..., shave:-shave, shave:-shave] mse = np.mean(pow(valid, 2)) return -10 * math.log10(mse) def calc_ssim(img1, img2, scale): """calculate ssim value""" def ssim(img1, img2): C1 = (0.01 * 255) ** 2 C2 = (0.03 * 255) ** 2 img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] mu1_sq = mu1**2 mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) ) return ssim_map.mean() border = 0 if scale != 1: border = scale else: border = scale + 6 img1_y = np.dot(img1, [65.738, 129.057, 25.064]) / 256.0 + 16.0 img2_y = np.dot(img2, [65.738, 129.057, 25.064]) / 256.0 + 16.0 if not img1.shape == img2.shape: raise ValueError("Input images must have the same dimensions.") h, w = img1.shape[:2] img1_y = img1_y[border : h - border, border : w - border] img2_y = img2_y[border : h - border, border : w - border] if img1_y.ndim == 2: return ssim(img1_y, img2_y) if img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for _ in range(3): ssims.append(ssim(img1, img2)) return np.array(ssims).mean() if img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError("Wrong input image dimensions.") def tensors_to_imgs(x): for i in range(len(x)): x[i] = x[i].squeeze(0).data.cpu().numpy() x[i] = x[i].clip(0, 255) # .round() x[i] = x[i].transpose(1, 2, 0).astype(np.uint8) return x def imgs_to_tensors(x): for i in range(len(x)): x[i] = x[i].transpose(2, 0, 1) x[i] = np.expand_dims(x[i], axis=0) x[i] = torch.Tensor(x[i].astype(float)) return x def rgb2y(rgb): return np.dot(rgb[..., :3], [65.738 / 256, 129.057 / 256, 25.064 / 256]) + 16 # def calc_psnr(sr, hr, scale, rgb_range=255, benchmark=False): # diff = (sr - hr).data.div(rgb_range) # shave = scale # if diff.size(1) > 1: # convert = diff.new(1, 3, 1, 1) # convert[0, 0, 0, 0] = 65.738 # convert[0, 1, 0, 0] = 129.057 # convert[0, 2, 0, 0] = 25.064 # diff.mul_(convert).div_(256) # diff = diff.sum(dim=1, keepdim=True) # ''' # if benchmark: # shave = scale # if diff.size(1) > 1: # convert = diff.new(1, 3, 1, 1) # convert[0, 0, 0, 0] = 65.738 # convert[0, 1, 0, 0] = 129.057 # convert[0, 2, 0, 0] = 25.064 # diff.mul_(convert).div_(256) # diff = diff.sum(dim=1, keepdim=True) # else: # shave = scale + 6 # ''' # valid = diff[:, :, shave:-shave, shave:-shave] # mse = valid.pow(2).mean() # return -10 * math.log10(mse) def np2Tensor(l, rgb_range): def _np2Tensor(img): np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) tensor = torch.from_numpy(np_transpose).float() tensor.mul_(rgb_range / 255) return tensor return [_np2Tensor(_l) for _l in l] def Tensor2img(v): normalized = v[0].data.mul(255 / 255) ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() return ndarr def quantize(img, rgb_range): pixel_range = 255 / rgb_range return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range) # # Models class Discriminator(nn.Module): def __init__( self, in_channels: int = 3, features: tuple = (64, 64, 128, 128, 256, 256, 512, 512), ) -> None: super(Discriminator, self).__init__() blocks = [] for idx, feature in enumerate(features): blocks.append( ConvBlock( in_channels, feature, kernel_size=3, stride=1 + idx % 2, padding=1, discriminator=True, use_act=True, use_bn=False if idx == 0 else True, ) ) in_channels = feature self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((6, 6)), nn.Flatten(), nn.Linear(512 * 6 * 6, 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, 1), ) def forward(self, x): x = self.blocks(x) return torch.sigmoid(self.classifier(x)) class VGG(nn.Module): def __init__(self): super(VGG, self).__init__() vgg_features = vgg19(pretrained=True).features modules = [m for m in vgg_features] self.vgg = nn.Sequential(*modules[:35]) # VGG 5_4 rgb_range = 255 vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std) self.vgg.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) return vgg_sr, vgg_hr def TVLoss(y): loss_var = torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.sum( torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]) ) return loss_var import torch from torch import nn def conv_same_padding( in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, padding_mode="zeros", ): # Calculate the padding size needed to maintain the same height and width padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode, ) class SeperableConv2d(nn.Sequential): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): super(SeperableConv2d, self).__init__( # nn.Conv2d( # in_channels=in_channels, out_channels=in_channels, groups=in_channels, # kernel_size=kernel_size, padding='same', dilation=dilation, # bias=bias, padding_mode=padding_mode # ), conv_same_padding( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, ), nn.Conv2d(in_channels, out_channels, kernel_size=1), ) class UpsampleBlock(nn.Sequential): def __init__(self, in_channels, scale_factor): if scale_factor == 2: super(UpsampleBlock, self).__init__( SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), ) else: super(UpsampleBlock, self).__init__( SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), ) class ConvBlock(nn.Sequential): def __init__( self, in_channels, out_channels, use_bn=False, use_ffc=False, use_act=True, discriminator=False, **kwargs, ): if use_ffc: conv = FFC( in_channels, out_channels, kernel_size=3, ratio_gin=0.5, ratio_gout=0.5, inline=True, ) else: conv = SeperableConv2d(in_channels, out_channels, **kwargs) m = [conv] if use_bn: m.append(nn.BatchNorm2d(out_channels)) if use_act: m.append( nn.LeakyReLU(0.2, inplace=True) if discriminator else nn.PReLU(num_parameters=out_channels) ) super(ConvBlock, self).__init__(*m) class ResidualBlock(nn.Module): def __init__(self, in_channels, index): super(ResidualBlock, self).__init__() self.block1 = ConvBlock( in_channels, in_channels, kernel_size=3, stride=1, padding=1, use_ffc=True if index % 2 == 0 else False, ) self.block2 = ConvBlock( in_channels, in_channels, kernel_size=3, stride=1, padding=1, use_act=False ) # self.skip_ops = nn.quantized.FloatFunctional() def forward(self, x): out = self.block1(x) out = self.block2(out) # out = out.mul(0.1) # out += x # out = self.skip_ops.mul_scalar(out, 0.1) # out = out / 10 out = torch.div(out, torch.tensor([10.0], dtype=x.dtype).to(x.device)) out = out + x return out class MeanShift(nn.Conv2d): def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std for p in self.parameters(): p.requires_grad = False class Generator(nn.Module): def __init__( self, chop=False, n_GPUs=1, in_channels: int = 3, num_channels: int = 64, num_blocks: int = 16, upscale_factor: int = 4, ): super(Generator, self).__init__() self.initial = ConvBlock( in_channels, num_channels, kernel_size=3, use_act=False ) self.residual = nn.Sequential( *[ResidualBlock(num_channels, index) for index in range(num_blocks)] ) self.upsampler = UpsampleBlock(num_channels, scale_factor=upscale_factor) self.final_conv = SeperableConv2d(num_channels, in_channels, kernel_size=3) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) rgb_range = 255.0 self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std) self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1) self.chop = chop self.n_GPUs = n_GPUs self.upscale_factor = upscale_factor def forward(self, x): if self.chop: return self.forward_chop(x) else: return self.model_forward(x) def model_forward(self, x): x = self.sub_mean(x) initial = self.initial(x) x = self.residual(initial) + initial x = self.upsampler(x) out = self.final_conv(x) out = self.add_mean(out) return out def forward_chop(self, x, shave=10, min_size=160000): scale = self.upscale_factor n_GPUs = min(self.n_GPUs, 4) b, c, h, w = x.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave lr_list = [ x[:, :, 0:h_size, 0:w_size], x[:, :, 0:h_size, (w - w_size) : w], x[:, :, (h - h_size) : h, 0:w_size], x[:, :, (h - h_size) : h, (w - w_size) : w], ] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): lr_batch = torch.cat(lr_list[i : (i + n_GPUs)], dim=0) sr_batch = self.model_forward(lr_batch) sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ self.forward_chop(patch, shave=shave, min_size=min_size) for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = x.new(b, c, h, w) output[:, :, 0:h_half, 0:w_half] = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] = sr_list[1][ :, :, 0:h_half, (w_size - w + w_half) : w_size ] output[:, :, h_half:h, 0:w_half] = sr_list[2][ :, :, (h_size - h + h_half) : h_size, 0:w_half ] output[:, :, h_half:h, w_half:w] = sr_list[3][ :, :, (h_size - h + h_half) : h_size, (w_size - w + w_half) : w_size ] return output def ComplexConv(x, weight): real = F.conv2d( x.real, weight.real, None, stride=1, padding=0, dilation=1, groups=1 ) - F.conv2d(x.imag, weight.imag, None, stride=1, padding=0, dilation=1, groups=1) imag = F.conv2d( x.real, weight.imag, None, stride=1, padding=0, dilation=1, groups=1 ) + F.conv2d(x.imag, weight.real, None, stride=1, padding=0, dilation=1, groups=1) x = torch.complex(real, imag) return x class FourierUnit(nn.Module): def __init__(self, in_channels, out_channels, ffc3d=False, fft_norm="ortho"): super(FourierUnit, self).__init__() self.complex_weight = nn.Parameter( torch.randn(out_channels, in_channels, 1, 1, 2, dtype=torch.float32) * 0.02 ) self.ffc3d = ffc3d self.fft_norm = fft_norm def forward(self, x): B, C, H, W = x.shape # fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) # Since ffc3d = false --> fft_dim = (-2, -1) fft_dim = (-2, -1) # try: # y = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) # except: # y = rfftn_val(x, dim=fft_dim, norm=self.fft_norm) # if ((y != y).any()): # raise Exception("Have nan in rfftn_val") y = torch.fft.rfftn(x.type(torch.float32), dim=fft_dim, norm=self.fft_norm) # FFT Shift y = torch.fft.fftshift(y) weight = torch.view_as_complex(self.complex_weight) y = ComplexConv(y, weight.type(torch.complex64)) # FFT IShift y = torch.fft.ifftshift(y) ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] # try: # y = torch.fft.irfftn(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) # except: # y = irfftn_val(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) # if ((y != y).any()): # raise Exception("Have nan in rfftn_val") y = torch.fft.irfftn(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) return y.type(x.dtype) class SpectralTransform(nn.Module): def __init__( self, in_channels, out_channels, stride=1, enable_lfu=True, **fu_kwargs ): super(SpectralTransform, self).__init__() self.enable_lfu = enable_lfu if stride == 2: self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) else: self.downsample = nn.Identity() self.stride = stride self.conv1 = nn.Sequential( SeperableConv2d(in_channels, out_channels // 2, kernel_size=1, bias=False), nn.ReLU(inplace=True), ) self.fu = FourierUnit(out_channels // 2, out_channels // 2, **fu_kwargs) if self.enable_lfu: self.lfu = FourierUnit(out_channels // 2, out_channels // 2) self.conv2 = SeperableConv2d( out_channels // 2, out_channels, kernel_size=1, bias=False ) # self.skip_ops = nn.quantized.FloatFunctional() def forward(self, x): x = self.downsample(x) x = self.conv1(x) output = self.fu(x) # if self.enable_lfu: # n, c, h, w = x.shape # split_no = 2 # split_h = h // split_no # split_w = w // split_no # xs = torch.cat(torch.split(x[:, :c // 4], split_h, dim=-2)[0:2], dim=1).contiguous() # xs = torch.cat(torch.split(xs, split_w, dim=-1)[0:2], dim=1).contiguous() # xs = self.lfu(xs) # xs = xs.repeat(1, 1, split_no, split_no).contiguous() # device_run = torch.device("cuda" if xs.is_cuda else "cpu") # if h % 2 == 1: # h_zeros = torch.zeros(xs.shape[0], xs.shape[1], 1, xs.shape[3]).to(device_run) # xs = torch.cat((xs, h_zeros), dim=2) # if w % 2 == 1: # w_zeros = torch.zeros(xs.shape[0], xs.shape[1], xs.shape[2], 1).to(device_run) # xs = torch.cat((xs, w_zeros), dim=3) # else: # xs = 0 # output = self.conv2(x + output + xs) if self.enable_lfu: n, c, h, w = x.shape split_no = 2 split_h = h // split_no split_w = w // split_no xs = torch.cat( torch.split(x[:, : c // 4], split_h, dim=-2)[0:2], dim=1 ).contiguous() xs = torch.cat(torch.split(xs, split_w, dim=-1)[0:2], dim=1).contiguous() xs = self.lfu(xs) xs = xs.repeat(1, 1, split_no, split_no).contiguous() device_run = torch.device("cuda" if xs.is_cuda else "cpu") if h % 2 == 1: h_zeros = torch.zeros( xs.shape[0], xs.shape[1], 1, xs.shape[3], dtype=x.dtype ).to(device_run) xs = torch.cat((xs, h_zeros), dim=2) if w % 2 == 1: w_zeros = torch.zeros( xs.shape[0], xs.shape[1], xs.shape[2], 1, dtype=x.dtype ).to(device_run) xs = torch.cat((xs, w_zeros), dim=3) output = self.conv2(x + output + xs) else: output = self.conv2(x + output) return output class FFC(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, ratio_gin, ratio_gout, inline=True, stride=1, padding=0, dilation=1, enable_lfu=True, padding_type="reflect", gated=False, **spectral_kwargs, ): super(FFC, self).__init__() assert stride == 1 or stride == 2, "Stride should be 1 or 2." self.stride = stride self.inline = inline in_cg = int(in_channels * ratio_gin) in_cl = in_channels - in_cg out_cg = int(out_channels * ratio_gout) out_cl = out_channels - out_cg self.ratio_gin = ratio_gin self.ratio_gout = ratio_gout self.global_in_num = in_cg module = nn.Identity if in_cl == 0 or out_cl == 0 else SeperableConv2d self.convl2l = module( in_cl, out_cl, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cl == 0 or out_cg == 0 else SeperableConv2d self.convl2g = module( in_cl, out_cg, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cg == 0 or out_cl == 0 else SeperableConv2d self.convg2l = module( in_cg, out_cl, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform self.convg2g = module(in_cg, out_cg, stride, enable_lfu, **spectral_kwargs) self.gated = gated module = ( nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else SeperableConv2d ) self.gate = module(in_channels, 2, 1) def forward(self, x): # if self.inline: # x_l, x_g = x[:, :-self.global_in_num], x[:, -self.global_in_num:] # else: # x_l, x_g = x if type(x) is tuple else (x, 0) # Since self.inline == True for all case --> x_l, x_g = x[:, :-self.global_in_num], x[:, -self.global_in_num:] x_l, x_g = x[:, : -self.global_in_num], x[:, -self.global_in_num :] # out_xl, out_xg = 0, 0 # if self.gated: # total_input_parts = [x_l] # if torch.is_tensor(x_g): # total_input_parts.append(x_g) # total_input_parts.append(x_g) # total_input = torch.cat(total_input_parts, dim=1) # gates = torch.sigmoid(self.gate(total_input)) # g2l_gate, l2g_gate = gates.chunk(2, dim=1) # else: # g2l_gate, l2g_gate = 1, 1 # Since self.gated == False for all case --> g2l_gate, l2g_gate = 1, 1 g2l_gate, l2g_gate = 1.0, 1.0 # if self.ratio_gout != 1: # out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate # if self.ratio_gout != 0: # out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) # Since self.ratio_gout != 1 and self.ratio_gout != 0 for all case --> Doing 2 lines code # Since g2l_gate = l2g_gate = 1 --> We doing 2 expressions out_xl = self.convl2l(x_l) + self.convg2l(x_g) out_xg = self.convl2g(x_l) + self.convg2g(x_g) # print(f"self.convl2g(x_l).shape = {self.convl2g(x_l).shape} and self.convg2g.shape = {self.convg2g(x_g).shape}") # if self.inline: # out = torch.cat([out_xl, out_xg], dim=1) # Since self.inline == True for all case --> doing action out = torch.cat([out_xl, out_xg], dim=1) return out class Discriminator(nn.Module): def __init__( self, in_channels: int = 3, features: tuple = (64, 64, 128, 128, 256, 256, 512, 512), ) -> None: super(Discriminator, self).__init__() blocks = [] for idx, feature in enumerate(features): blocks.append( ConvBlock( in_channels, feature, kernel_size=3, stride=1 + idx % 2, padding=1, discriminator=True, use_act=True, use_bn=False if idx == 0 else True, ) ) in_channels = feature self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((6, 6)), nn.Flatten(), nn.Linear(512 * 6 * 6, 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, 1), ) def forward(self, x): x = self.blocks(x) return torch.sigmoid(self.classifier(x)) class GeneratorLoss(nn.Module): def __init__(self, mode): super(GeneratorLoss, self).__init__() self.loss_network = VGG() self.mse_loss = nn.MSELoss() self.mae_loss = nn.L1Loss() if mode == "pre": self.img_to, self.adv_to, self.per_to, self.tv_to = 1, 0.001, 0.006, 2e-8 elif mode == "per": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0, 1, 0 elif mode == "gan": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 0 elif mode == "rgan": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 0 elif mode == "full": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 2e-8 print( f"Trade-off params of img, adv, per, tv is: {self.img_to, self.adv_to, self.per_to, self.tv_to}" ) self.mode = mode def forward(self, fake_out, real_out, out_images, target_images, target_real): # Adversarial Loss adversarial_loss = ( nn.BCEWithLogitsLoss()(fake_out, target_real) if self.mode == "gan" else nn.BCEWithLogitsLoss()(fake_out - real_out, target_real) ) # Perception Loss a, b = self.loss_network(out_images, target_images) perception_loss = self.mse_loss(a, b) # Image Loss image_loss = self.mae_loss(out_images, target_images) # TV Loss tv_loss = TVLoss(out_images) return ( image_loss * self.img_to + adversarial_loss * self.adv_to + perception_loss * self.per_to + tv_loss * self.tv_to ) class VGG(nn.Module): def __init__(self): super(VGG, self).__init__() vgg_features = vgg19(pretrained=True).features modules = [m for m in vgg_features] self.vgg = nn.Sequential(*modules[:35]) # VGG 5_4 rgb_range = 255 vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std) self.vgg.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) return vgg_sr, vgg_hr def TVLoss(y): loss_var = torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.sum( torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]) ) return loss_var # ## Settings NUM_BIT_QUANTIZED = 16 POSTFIX = ( "float32" if (NUM_BIT_QUANTIZED == 32) else "fp16" if (NUM_BIT_QUANTIZED == 16) else "int8" ) DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") UPSCALE_FACTOR = 4 RGB_RANGE = 255 MODEL_FILEPATH = "../input/models-pretrain/F2SRGAN/netG_4x_epoch45.pt" RESOLUTION = [144, 360, 480, 720, 1080] RGB_RANGE = 255.0 REPLICATION_EXP = 10 PRECISION = torch.float if (NUM_BIT_QUANTIZED == 32) else {torch.half, torch.float} POSTFIX = "float32" if (NUM_BIT_QUANTIZED == 32) else "fp16" MODEL_NAME = "F2SRGAN" FILE_QAT_NAME_OUTPUT = f"/kaggle/working/trained_{MODEL_NAME}_{POSTFIX}" FILE_QAT_NAME = f"../input/f2srgan-reference/trained_{MODEL_NAME}_{POSTFIX}" model_before_QAT = copy.deepcopy(Generator(chop=True, upscale_factor=UPSCALE_FACTOR)) model_before_QAT.load_state_dict(torch.load(MODEL_FILEPATH)["model"]) model_before_QAT.eval() # ## Code inference measure time def F2SRGAN_inference_time_check_JIT( model, input_shape=(1024, 1, 32, 32), dtype="fp32", nwarmup=50, nruns=500 ): input_data = torch.randn(input_shape) input_data = input_data.to("cuda") if dtype == "fp16": input_data = input_data.half() print("Warm up ...") with torch.no_grad(): for _ in range(nwarmup): features = model(input_data) torch.cuda.synchronize() print("Start timing ...") timings = [] with torch.no_grad(): for i in range(1, nruns + 1): start_time = time.time() output = model(input_data) torch.cuda.synchronize() end_time = time.time() timings.append(end_time - start_time) if i % 100 == 0: print( "Iteration %d/%d, avg batch time %.2f ms" % (i, nruns, np.mean(timings) * 1000) ) print("Input shape:", input_data.size()) print("Output shape:", output.shape) print("Average batch time: %.2f ms" % (np.mean(timings) * 1000)) # ### For resolution 144p, 360p, 480p, 720p, 1080p NO_JIT_NOW = True for resolution in RESOLUTION: if NO_JIT_NOW == True: with torch.no_grad(): jit_model = torch.jit.trace( model_before_QAT.cuda().half(), torch.rand(1, 3, resolution, resolution).to("cuda").half(), ) torch.jit.save(jit_model, FILE_QAT_NAME_OUTPUT + f"_{resolution}.jit.pt") qat_model = ( torch.jit.load(FILE_QAT_NAME_OUTPUT + f"_{resolution}.jit.pt") .eval() .to("cuda") ) else: qat_model = ( torch.jit.load(FILE_QAT_NAME + f"_{resolution}.jit.pt").eval().to("cuda") ) print( f"--------------------Resolution = {resolution} x {resolution}--------------------" ) F2SRGAN_inference_time_check_JIT( qat_model, input_shape=(1, 3, resolution, resolution), dtype=POSTFIX )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/724/129724062.ipynb
sr-testing-data
hungvu22
[{"Id": 129724062, "ScriptId": 38577609, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12051758, "CreationDate": "05/16/2023 03:29:14", "VersionNumber": 1.0, "Title": "inference_time_F2SRGAN_No QAT_fp16+32", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 830.0, "LinesInsertedFromPrevious": 59.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 771.0, "LinesInsertedFromFork": 59.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 771.0, "TotalVotes": 0}]
[{"Id": 186064807, "KernelVersionId": 129724062, "SourceDatasetVersionId": 4965272}, {"Id": 186064809, "KernelVersionId": 129724062, "SourceDatasetVersionId": 5642896}]
[{"Id": 4965272, "DatasetId": 2879704, "DatasourceVersionId": 5033750, "CreatorUserId": 6239434, "LicenseName": "Unknown", "CreationDate": "02/08/2023 12:54:09", "VersionNumber": 1.0, "Title": "sr_testing_data", "Slug": "sr-testing-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2879704, "CreatorUserId": 6239434, "OwnerUserId": 6239434.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4965272.0, "CurrentDatasourceVersionId": 5033750.0, "ForumId": 2916194, "Type": 2, "CreationDate": "02/08/2023 12:54:09", "LastActivityDate": "02/08/2023", "TotalViews": 45, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 9}]
[{"Id": 6239434, "UserName": "hungvu22", "DisplayName": "V\u0169 Kh\u00e1nh H\u01b0ng", "RegisterDate": "11/25/2020", "PerformanceTier": 0}]
# #upgrade pytorch to 1.12 # !pip install /kaggle/input/pytorch112-cu113/{torch-1.12.1+cu113-cp37-cp37m-linux_x86_64.whl,torchvision-0.13.1+cu113-cp37-cp37m-linux_x86_64.whl} # #install nvidia-pyindex # !pip install /kaggle/input/torch-tensorrt-pkg/nvidia_pyindex-1.0.9-py3-none-any.whl # #install nvidia_tensorrt # !mkdir -p /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cublas-cu11-2022.4.8.xyz /tmp/pip/cache/nvidia-cublas-cu11-2022.4.8.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cuda-runtime-cu11-2022.4.25.xyz /tmp/pip/cache/nvidia-cuda-runtime-cu11-2022.4.25.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia-cudnn-cu11-2022.5.19.xyz /tmp/pip/cache/nvidia-cudnn-cu11-2022.5.19.tar.gz # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cublas_cu117-11.10.1.25-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cuda_runtime_cu117-11.7.60-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_cudnn_cu116-8.4.0.27-py3-none-manylinux1_x86_64.whl /tmp/pip/cache/ # !cp /kaggle/input/torch-tensorrt-pkg/nvidia_tensorrt-8.4.3.1-cp37-none-linux_x86_64.whl /tmp/pip/cache/ # !pip install --no-index --find-links /tmp/pip/cache/ nvidia_tensorrt # #install torch_tensorrt # !pip install /kaggle/input/torch-tensorrt-pkg/torch_tensorrt-1.2.0-cp37-cp37m-linux_x86_64.whl # #install pytorch_quantization # !pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com # !pip install nvidia-pyindex # !pip install torch-tensorrt==1.3.0 -f https://github.com/pytorch/tensorrt/releases import os from PIL import Image from torchvision import transforms from torch.utils.data import Dataset import torch from torch import nn from torchvision.models import vgg19 import math import torch.nn.functional as F from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import torch.optim.lr_scheduler as lr_scheduler import copy from torch.autograd import Function, Variable from collections import namedtuple, OrderedDict from torchvision.transforms.functional import to_tensor import time import gc import psutil import cv2 import imageio # import tensorrt # import torch_tensorrt # ## Ult def gaussian(window_size, sigma): gauss = torch.Tensor( [ math.exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) for x in range(window_size) ] ) return gauss / gauss.sum() def create_window(window_size, channel): _1D_window = gaussian(window_size, 1.5).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() return window def calc_psnr(sr, hr, scale, rgb_range): """calculate psnr""" hr = np.float32(hr) sr = np.float32(sr) diff = (sr - hr) / rgb_range gray_coeffs = np.array([65.738, 129.057, 25.064]).reshape((1, 3, 1, 1)) / 256 diff = np.multiply(diff, gray_coeffs).sum(1) if hr.size == 1: return 0 if scale != 1: shave = scale else: shave = scale + 6 if scale == 1: valid = diff else: valid = diff[..., shave:-shave, shave:-shave] mse = np.mean(pow(valid, 2)) return -10 * math.log10(mse) def calc_ssim(img1, img2, scale): """calculate ssim value""" def ssim(img1, img2): C1 = (0.01 * 255) ** 2 C2 = (0.03 * 255) ** 2 img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] mu1_sq = mu1**2 mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) ) return ssim_map.mean() border = 0 if scale != 1: border = scale else: border = scale + 6 img1_y = np.dot(img1, [65.738, 129.057, 25.064]) / 256.0 + 16.0 img2_y = np.dot(img2, [65.738, 129.057, 25.064]) / 256.0 + 16.0 if not img1.shape == img2.shape: raise ValueError("Input images must have the same dimensions.") h, w = img1.shape[:2] img1_y = img1_y[border : h - border, border : w - border] img2_y = img2_y[border : h - border, border : w - border] if img1_y.ndim == 2: return ssim(img1_y, img2_y) if img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for _ in range(3): ssims.append(ssim(img1, img2)) return np.array(ssims).mean() if img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError("Wrong input image dimensions.") def tensors_to_imgs(x): for i in range(len(x)): x[i] = x[i].squeeze(0).data.cpu().numpy() x[i] = x[i].clip(0, 255) # .round() x[i] = x[i].transpose(1, 2, 0).astype(np.uint8) return x def imgs_to_tensors(x): for i in range(len(x)): x[i] = x[i].transpose(2, 0, 1) x[i] = np.expand_dims(x[i], axis=0) x[i] = torch.Tensor(x[i].astype(float)) return x def rgb2y(rgb): return np.dot(rgb[..., :3], [65.738 / 256, 129.057 / 256, 25.064 / 256]) + 16 # def calc_psnr(sr, hr, scale, rgb_range=255, benchmark=False): # diff = (sr - hr).data.div(rgb_range) # shave = scale # if diff.size(1) > 1: # convert = diff.new(1, 3, 1, 1) # convert[0, 0, 0, 0] = 65.738 # convert[0, 1, 0, 0] = 129.057 # convert[0, 2, 0, 0] = 25.064 # diff.mul_(convert).div_(256) # diff = diff.sum(dim=1, keepdim=True) # ''' # if benchmark: # shave = scale # if diff.size(1) > 1: # convert = diff.new(1, 3, 1, 1) # convert[0, 0, 0, 0] = 65.738 # convert[0, 1, 0, 0] = 129.057 # convert[0, 2, 0, 0] = 25.064 # diff.mul_(convert).div_(256) # diff = diff.sum(dim=1, keepdim=True) # else: # shave = scale + 6 # ''' # valid = diff[:, :, shave:-shave, shave:-shave] # mse = valid.pow(2).mean() # return -10 * math.log10(mse) def np2Tensor(l, rgb_range): def _np2Tensor(img): np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) tensor = torch.from_numpy(np_transpose).float() tensor.mul_(rgb_range / 255) return tensor return [_np2Tensor(_l) for _l in l] def Tensor2img(v): normalized = v[0].data.mul(255 / 255) ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() return ndarr def quantize(img, rgb_range): pixel_range = 255 / rgb_range return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range) # # Models class Discriminator(nn.Module): def __init__( self, in_channels: int = 3, features: tuple = (64, 64, 128, 128, 256, 256, 512, 512), ) -> None: super(Discriminator, self).__init__() blocks = [] for idx, feature in enumerate(features): blocks.append( ConvBlock( in_channels, feature, kernel_size=3, stride=1 + idx % 2, padding=1, discriminator=True, use_act=True, use_bn=False if idx == 0 else True, ) ) in_channels = feature self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((6, 6)), nn.Flatten(), nn.Linear(512 * 6 * 6, 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, 1), ) def forward(self, x): x = self.blocks(x) return torch.sigmoid(self.classifier(x)) class VGG(nn.Module): def __init__(self): super(VGG, self).__init__() vgg_features = vgg19(pretrained=True).features modules = [m for m in vgg_features] self.vgg = nn.Sequential(*modules[:35]) # VGG 5_4 rgb_range = 255 vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std) self.vgg.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) return vgg_sr, vgg_hr def TVLoss(y): loss_var = torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.sum( torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]) ) return loss_var import torch from torch import nn def conv_same_padding( in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, padding_mode="zeros", ): # Calculate the padding size needed to maintain the same height and width padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode, ) class SeperableConv2d(nn.Sequential): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): super(SeperableConv2d, self).__init__( # nn.Conv2d( # in_channels=in_channels, out_channels=in_channels, groups=in_channels, # kernel_size=kernel_size, padding='same', dilation=dilation, # bias=bias, padding_mode=padding_mode # ), conv_same_padding( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, ), nn.Conv2d(in_channels, out_channels, kernel_size=1), ) class UpsampleBlock(nn.Sequential): def __init__(self, in_channels, scale_factor): if scale_factor == 2: super(UpsampleBlock, self).__init__( SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), ) else: super(UpsampleBlock, self).__init__( SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), SeperableConv2d(in_channels, in_channels * 2**2, kernel_size=3), nn.PixelShuffle(2), nn.PReLU(num_parameters=in_channels), ) class ConvBlock(nn.Sequential): def __init__( self, in_channels, out_channels, use_bn=False, use_ffc=False, use_act=True, discriminator=False, **kwargs, ): if use_ffc: conv = FFC( in_channels, out_channels, kernel_size=3, ratio_gin=0.5, ratio_gout=0.5, inline=True, ) else: conv = SeperableConv2d(in_channels, out_channels, **kwargs) m = [conv] if use_bn: m.append(nn.BatchNorm2d(out_channels)) if use_act: m.append( nn.LeakyReLU(0.2, inplace=True) if discriminator else nn.PReLU(num_parameters=out_channels) ) super(ConvBlock, self).__init__(*m) class ResidualBlock(nn.Module): def __init__(self, in_channels, index): super(ResidualBlock, self).__init__() self.block1 = ConvBlock( in_channels, in_channels, kernel_size=3, stride=1, padding=1, use_ffc=True if index % 2 == 0 else False, ) self.block2 = ConvBlock( in_channels, in_channels, kernel_size=3, stride=1, padding=1, use_act=False ) # self.skip_ops = nn.quantized.FloatFunctional() def forward(self, x): out = self.block1(x) out = self.block2(out) # out = out.mul(0.1) # out += x # out = self.skip_ops.mul_scalar(out, 0.1) # out = out / 10 out = torch.div(out, torch.tensor([10.0], dtype=x.dtype).to(x.device)) out = out + x return out class MeanShift(nn.Conv2d): def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std for p in self.parameters(): p.requires_grad = False class Generator(nn.Module): def __init__( self, chop=False, n_GPUs=1, in_channels: int = 3, num_channels: int = 64, num_blocks: int = 16, upscale_factor: int = 4, ): super(Generator, self).__init__() self.initial = ConvBlock( in_channels, num_channels, kernel_size=3, use_act=False ) self.residual = nn.Sequential( *[ResidualBlock(num_channels, index) for index in range(num_blocks)] ) self.upsampler = UpsampleBlock(num_channels, scale_factor=upscale_factor) self.final_conv = SeperableConv2d(num_channels, in_channels, kernel_size=3) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) rgb_range = 255.0 self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std) self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1) self.chop = chop self.n_GPUs = n_GPUs self.upscale_factor = upscale_factor def forward(self, x): if self.chop: return self.forward_chop(x) else: return self.model_forward(x) def model_forward(self, x): x = self.sub_mean(x) initial = self.initial(x) x = self.residual(initial) + initial x = self.upsampler(x) out = self.final_conv(x) out = self.add_mean(out) return out def forward_chop(self, x, shave=10, min_size=160000): scale = self.upscale_factor n_GPUs = min(self.n_GPUs, 4) b, c, h, w = x.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave lr_list = [ x[:, :, 0:h_size, 0:w_size], x[:, :, 0:h_size, (w - w_size) : w], x[:, :, (h - h_size) : h, 0:w_size], x[:, :, (h - h_size) : h, (w - w_size) : w], ] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): lr_batch = torch.cat(lr_list[i : (i + n_GPUs)], dim=0) sr_batch = self.model_forward(lr_batch) sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ self.forward_chop(patch, shave=shave, min_size=min_size) for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = x.new(b, c, h, w) output[:, :, 0:h_half, 0:w_half] = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] = sr_list[1][ :, :, 0:h_half, (w_size - w + w_half) : w_size ] output[:, :, h_half:h, 0:w_half] = sr_list[2][ :, :, (h_size - h + h_half) : h_size, 0:w_half ] output[:, :, h_half:h, w_half:w] = sr_list[3][ :, :, (h_size - h + h_half) : h_size, (w_size - w + w_half) : w_size ] return output def ComplexConv(x, weight): real = F.conv2d( x.real, weight.real, None, stride=1, padding=0, dilation=1, groups=1 ) - F.conv2d(x.imag, weight.imag, None, stride=1, padding=0, dilation=1, groups=1) imag = F.conv2d( x.real, weight.imag, None, stride=1, padding=0, dilation=1, groups=1 ) + F.conv2d(x.imag, weight.real, None, stride=1, padding=0, dilation=1, groups=1) x = torch.complex(real, imag) return x class FourierUnit(nn.Module): def __init__(self, in_channels, out_channels, ffc3d=False, fft_norm="ortho"): super(FourierUnit, self).__init__() self.complex_weight = nn.Parameter( torch.randn(out_channels, in_channels, 1, 1, 2, dtype=torch.float32) * 0.02 ) self.ffc3d = ffc3d self.fft_norm = fft_norm def forward(self, x): B, C, H, W = x.shape # fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) # Since ffc3d = false --> fft_dim = (-2, -1) fft_dim = (-2, -1) # try: # y = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) # except: # y = rfftn_val(x, dim=fft_dim, norm=self.fft_norm) # if ((y != y).any()): # raise Exception("Have nan in rfftn_val") y = torch.fft.rfftn(x.type(torch.float32), dim=fft_dim, norm=self.fft_norm) # FFT Shift y = torch.fft.fftshift(y) weight = torch.view_as_complex(self.complex_weight) y = ComplexConv(y, weight.type(torch.complex64)) # FFT IShift y = torch.fft.ifftshift(y) ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] # try: # y = torch.fft.irfftn(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) # except: # y = irfftn_val(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) # if ((y != y).any()): # raise Exception("Have nan in rfftn_val") y = torch.fft.irfftn(y, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) return y.type(x.dtype) class SpectralTransform(nn.Module): def __init__( self, in_channels, out_channels, stride=1, enable_lfu=True, **fu_kwargs ): super(SpectralTransform, self).__init__() self.enable_lfu = enable_lfu if stride == 2: self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) else: self.downsample = nn.Identity() self.stride = stride self.conv1 = nn.Sequential( SeperableConv2d(in_channels, out_channels // 2, kernel_size=1, bias=False), nn.ReLU(inplace=True), ) self.fu = FourierUnit(out_channels // 2, out_channels // 2, **fu_kwargs) if self.enable_lfu: self.lfu = FourierUnit(out_channels // 2, out_channels // 2) self.conv2 = SeperableConv2d( out_channels // 2, out_channels, kernel_size=1, bias=False ) # self.skip_ops = nn.quantized.FloatFunctional() def forward(self, x): x = self.downsample(x) x = self.conv1(x) output = self.fu(x) # if self.enable_lfu: # n, c, h, w = x.shape # split_no = 2 # split_h = h // split_no # split_w = w // split_no # xs = torch.cat(torch.split(x[:, :c // 4], split_h, dim=-2)[0:2], dim=1).contiguous() # xs = torch.cat(torch.split(xs, split_w, dim=-1)[0:2], dim=1).contiguous() # xs = self.lfu(xs) # xs = xs.repeat(1, 1, split_no, split_no).contiguous() # device_run = torch.device("cuda" if xs.is_cuda else "cpu") # if h % 2 == 1: # h_zeros = torch.zeros(xs.shape[0], xs.shape[1], 1, xs.shape[3]).to(device_run) # xs = torch.cat((xs, h_zeros), dim=2) # if w % 2 == 1: # w_zeros = torch.zeros(xs.shape[0], xs.shape[1], xs.shape[2], 1).to(device_run) # xs = torch.cat((xs, w_zeros), dim=3) # else: # xs = 0 # output = self.conv2(x + output + xs) if self.enable_lfu: n, c, h, w = x.shape split_no = 2 split_h = h // split_no split_w = w // split_no xs = torch.cat( torch.split(x[:, : c // 4], split_h, dim=-2)[0:2], dim=1 ).contiguous() xs = torch.cat(torch.split(xs, split_w, dim=-1)[0:2], dim=1).contiguous() xs = self.lfu(xs) xs = xs.repeat(1, 1, split_no, split_no).contiguous() device_run = torch.device("cuda" if xs.is_cuda else "cpu") if h % 2 == 1: h_zeros = torch.zeros( xs.shape[0], xs.shape[1], 1, xs.shape[3], dtype=x.dtype ).to(device_run) xs = torch.cat((xs, h_zeros), dim=2) if w % 2 == 1: w_zeros = torch.zeros( xs.shape[0], xs.shape[1], xs.shape[2], 1, dtype=x.dtype ).to(device_run) xs = torch.cat((xs, w_zeros), dim=3) output = self.conv2(x + output + xs) else: output = self.conv2(x + output) return output class FFC(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, ratio_gin, ratio_gout, inline=True, stride=1, padding=0, dilation=1, enable_lfu=True, padding_type="reflect", gated=False, **spectral_kwargs, ): super(FFC, self).__init__() assert stride == 1 or stride == 2, "Stride should be 1 or 2." self.stride = stride self.inline = inline in_cg = int(in_channels * ratio_gin) in_cl = in_channels - in_cg out_cg = int(out_channels * ratio_gout) out_cl = out_channels - out_cg self.ratio_gin = ratio_gin self.ratio_gout = ratio_gout self.global_in_num = in_cg module = nn.Identity if in_cl == 0 or out_cl == 0 else SeperableConv2d self.convl2l = module( in_cl, out_cl, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cl == 0 or out_cg == 0 else SeperableConv2d self.convl2g = module( in_cl, out_cg, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cg == 0 or out_cl == 0 else SeperableConv2d self.convg2l = module( in_cg, out_cl, kernel_size, stride, padding, dilation, padding_mode=padding_type, ) module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform self.convg2g = module(in_cg, out_cg, stride, enable_lfu, **spectral_kwargs) self.gated = gated module = ( nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else SeperableConv2d ) self.gate = module(in_channels, 2, 1) def forward(self, x): # if self.inline: # x_l, x_g = x[:, :-self.global_in_num], x[:, -self.global_in_num:] # else: # x_l, x_g = x if type(x) is tuple else (x, 0) # Since self.inline == True for all case --> x_l, x_g = x[:, :-self.global_in_num], x[:, -self.global_in_num:] x_l, x_g = x[:, : -self.global_in_num], x[:, -self.global_in_num :] # out_xl, out_xg = 0, 0 # if self.gated: # total_input_parts = [x_l] # if torch.is_tensor(x_g): # total_input_parts.append(x_g) # total_input_parts.append(x_g) # total_input = torch.cat(total_input_parts, dim=1) # gates = torch.sigmoid(self.gate(total_input)) # g2l_gate, l2g_gate = gates.chunk(2, dim=1) # else: # g2l_gate, l2g_gate = 1, 1 # Since self.gated == False for all case --> g2l_gate, l2g_gate = 1, 1 g2l_gate, l2g_gate = 1.0, 1.0 # if self.ratio_gout != 1: # out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate # if self.ratio_gout != 0: # out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) # Since self.ratio_gout != 1 and self.ratio_gout != 0 for all case --> Doing 2 lines code # Since g2l_gate = l2g_gate = 1 --> We doing 2 expressions out_xl = self.convl2l(x_l) + self.convg2l(x_g) out_xg = self.convl2g(x_l) + self.convg2g(x_g) # print(f"self.convl2g(x_l).shape = {self.convl2g(x_l).shape} and self.convg2g.shape = {self.convg2g(x_g).shape}") # if self.inline: # out = torch.cat([out_xl, out_xg], dim=1) # Since self.inline == True for all case --> doing action out = torch.cat([out_xl, out_xg], dim=1) return out class Discriminator(nn.Module): def __init__( self, in_channels: int = 3, features: tuple = (64, 64, 128, 128, 256, 256, 512, 512), ) -> None: super(Discriminator, self).__init__() blocks = [] for idx, feature in enumerate(features): blocks.append( ConvBlock( in_channels, feature, kernel_size=3, stride=1 + idx % 2, padding=1, discriminator=True, use_act=True, use_bn=False if idx == 0 else True, ) ) in_channels = feature self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((6, 6)), nn.Flatten(), nn.Linear(512 * 6 * 6, 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, 1), ) def forward(self, x): x = self.blocks(x) return torch.sigmoid(self.classifier(x)) class GeneratorLoss(nn.Module): def __init__(self, mode): super(GeneratorLoss, self).__init__() self.loss_network = VGG() self.mse_loss = nn.MSELoss() self.mae_loss = nn.L1Loss() if mode == "pre": self.img_to, self.adv_to, self.per_to, self.tv_to = 1, 0.001, 0.006, 2e-8 elif mode == "per": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0, 1, 0 elif mode == "gan": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 0 elif mode == "rgan": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 0 elif mode == "full": self.img_to, self.adv_to, self.per_to, self.tv_to = 0, 0.6, 1, 2e-8 print( f"Trade-off params of img, adv, per, tv is: {self.img_to, self.adv_to, self.per_to, self.tv_to}" ) self.mode = mode def forward(self, fake_out, real_out, out_images, target_images, target_real): # Adversarial Loss adversarial_loss = ( nn.BCEWithLogitsLoss()(fake_out, target_real) if self.mode == "gan" else nn.BCEWithLogitsLoss()(fake_out - real_out, target_real) ) # Perception Loss a, b = self.loss_network(out_images, target_images) perception_loss = self.mse_loss(a, b) # Image Loss image_loss = self.mae_loss(out_images, target_images) # TV Loss tv_loss = TVLoss(out_images) return ( image_loss * self.img_to + adversarial_loss * self.adv_to + perception_loss * self.per_to + tv_loss * self.tv_to ) class VGG(nn.Module): def __init__(self): super(VGG, self).__init__() vgg_features = vgg19(pretrained=True).features modules = [m for m in vgg_features] self.vgg = nn.Sequential(*modules[:35]) # VGG 5_4 rgb_range = 255 vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std) self.vgg.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) return vgg_sr, vgg_hr def TVLoss(y): loss_var = torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.sum( torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]) ) return loss_var # ## Settings NUM_BIT_QUANTIZED = 16 POSTFIX = ( "float32" if (NUM_BIT_QUANTIZED == 32) else "fp16" if (NUM_BIT_QUANTIZED == 16) else "int8" ) DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") UPSCALE_FACTOR = 4 RGB_RANGE = 255 MODEL_FILEPATH = "../input/models-pretrain/F2SRGAN/netG_4x_epoch45.pt" RESOLUTION = [144, 360, 480, 720, 1080] RGB_RANGE = 255.0 REPLICATION_EXP = 10 PRECISION = torch.float if (NUM_BIT_QUANTIZED == 32) else {torch.half, torch.float} POSTFIX = "float32" if (NUM_BIT_QUANTIZED == 32) else "fp16" MODEL_NAME = "F2SRGAN" FILE_QAT_NAME_OUTPUT = f"/kaggle/working/trained_{MODEL_NAME}_{POSTFIX}" FILE_QAT_NAME = f"../input/f2srgan-reference/trained_{MODEL_NAME}_{POSTFIX}" model_before_QAT = copy.deepcopy(Generator(chop=True, upscale_factor=UPSCALE_FACTOR)) model_before_QAT.load_state_dict(torch.load(MODEL_FILEPATH)["model"]) model_before_QAT.eval() # ## Code inference measure time def F2SRGAN_inference_time_check_JIT( model, input_shape=(1024, 1, 32, 32), dtype="fp32", nwarmup=50, nruns=500 ): input_data = torch.randn(input_shape) input_data = input_data.to("cuda") if dtype == "fp16": input_data = input_data.half() print("Warm up ...") with torch.no_grad(): for _ in range(nwarmup): features = model(input_data) torch.cuda.synchronize() print("Start timing ...") timings = [] with torch.no_grad(): for i in range(1, nruns + 1): start_time = time.time() output = model(input_data) torch.cuda.synchronize() end_time = time.time() timings.append(end_time - start_time) if i % 100 == 0: print( "Iteration %d/%d, avg batch time %.2f ms" % (i, nruns, np.mean(timings) * 1000) ) print("Input shape:", input_data.size()) print("Output shape:", output.shape) print("Average batch time: %.2f ms" % (np.mean(timings) * 1000)) # ### For resolution 144p, 360p, 480p, 720p, 1080p NO_JIT_NOW = True for resolution in RESOLUTION: if NO_JIT_NOW == True: with torch.no_grad(): jit_model = torch.jit.trace( model_before_QAT.cuda().half(), torch.rand(1, 3, resolution, resolution).to("cuda").half(), ) torch.jit.save(jit_model, FILE_QAT_NAME_OUTPUT + f"_{resolution}.jit.pt") qat_model = ( torch.jit.load(FILE_QAT_NAME_OUTPUT + f"_{resolution}.jit.pt") .eval() .to("cuda") ) else: qat_model = ( torch.jit.load(FILE_QAT_NAME + f"_{resolution}.jit.pt").eval().to("cuda") ) print( f"--------------------Resolution = {resolution} x {resolution}--------------------" ) F2SRGAN_inference_time_check_JIT( qat_model, input_shape=(1, 3, resolution, resolution), dtype=POSTFIX )
false
0
11,148
0
11,171
11,148
129753665
<jupyter_start><jupyter_text>Cancer Data **570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant** **Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).** Kaggle dataset identifier: cancer-data <jupyter_script># # Import necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from yellowbrick.classifier import ConfusionMatrix # # Import dataset df = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv") df # #### Here we can see all the categorical and numerical data df.info() df.dtypes df.describe() # #### Drop the id and unnamed column because they are unique id df = df.drop(["id", "Unnamed: 32"], axis=1) # #### Checking the null values df.isnull().sum() # # Data analysis sns.countplot(x="diagnosis", data=df) df.diagnosis.value_counts(normalize=True) # #### Here we can see that about 62% is Benign cancer (B) and rest is Malignant cancer (M) def plot_histograms(data, columns): fig, axes = plt.subplots(nrows=len(columns), ncols=1, figsize=(20, 20)) for i, col in enumerate(columns): sns.histplot(data[col], kde=False, ax=axes[i]) axes[i].set_xlabel(col) plt.tight_layout() plt.show() # #### Here we can see the continous data of our dataset plot_columns = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave points_mean", "symmetry_mean", "fractal_dimension_mean", ] plot_histograms(df, plot_columns) plot_columns = [ "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave points_se", "symmetry_se", "fractal_dimension_se", ] plot_histograms(df, plot_columns) plot_columns = [ "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave points_worst", "symmetry_worst", "fractal_dimension_worst", ] plot_histograms(df, plot_columns) def box_plot(data, columns): fig, axes = plt.subplots(nrows=len(columns), ncols=1, figsize=(20, 25)) for i, col in enumerate(columns): sns.boxplot(data[col], ax=axes[i]) axes[i].set_xlabel(col) plt.tight_layout() plt.show() # #### Here we create boxplots to visualize the outliers plot_columns = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave points_mean", "symmetry_mean", "fractal_dimension_mean", ] box_plot(df, plot_columns) plot_columns = [ "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave points_se", "symmetry_se", "fractal_dimension_se", ] box_plot(df, plot_columns) plot_columns = [ "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave points_worst", "symmetry_worst", "fractal_dimension_worst", ] box_plot(df, plot_columns) # #### Here we visualize the dependent variables in the dataset. So I create correlation corr = df.corr().round(2) plt.figure(figsize=(25, 20)) sns.heatmap(corr, annot=True, cmap="crest") # # Data preprocessing # # Remove outliers df.shape # #### Removing the outliers with percentile method. Above the 20% data consider as outliers ## Remove the outliers def drop_outliers(data, feature): iqr = 1.5 * (np.percentile(data[feature], 80) - np.percentile(data[feature], 10)) data.drop( data[data[feature] > (iqr + np.percentile(data[feature], 80))].index, inplace=True, ) data.drop( data[data[feature] < (np.percentile(data[feature], 10) - iqr)].index, inplace=True, ) # select all numeric columns numeric_cols = df.select_dtypes(include=np.number).columns.tolist() # apply outlier removal to each numeric feature for feature in numeric_cols: drop_outliers(df, feature) df.shape # # Encode catagorical data df.head() label_encoder = LabelEncoder() df["diagnosis"] = label_encoder.fit_transform(df["diagnosis"]) df["diagnosis"].values X = df.drop("diagnosis", axis=1) X = X.values y = df["diagnosis"] # # Spliting the data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Normalization # #### Here I use standardScale to put the data into same scale scaler = StandardScaler() X_train_std = scaler.fit_transform(X_train) X_test_std = scaler.transform(X_test) # # Apply PCA # #### dimensionality reduction technique used to transform high-dimensional data into a lower-dimensional representation while preserving the essential information in the data. # #### In our dataset previously we have seen that there are many highly correlation variables. # ### Apply PCA on train data pca = PCA(n_components=X_train.shape[1]) pca.fit(X_train_std) # ### Determine the optimal number of components explained_variance = pca.explained_variance_ratio_ cumulative_variance = np.cumsum(explained_variance) n_components = np.argmax(cumulative_variance >= 0.95) + 1 print(f"selected num of components:{n_components}") # #### Transform the traing and test sets with selected n_components pca = PCA(n_components=n_components) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test_std) X_train_pca # # Model selection from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from xgboost import XGBClassifier from sklearn.model_selection import RandomizedSearchCV, GridSearchCV # # Hyper-parameter tunning lr = LogisticRegression(random_state=42) knn = KNeighborsClassifier() dt = DecisionTreeClassifier() rf = RandomForestClassifier() ada = AdaBoostClassifier() xgb = XGBClassifier(eval_metric="logloss", use_label_encoder=False) # parameter for KNN para_knn = {"n_neighbors": np.arange(1, 50)} grid_knn = GridSearchCV(knn, param_grid=para_knn, cv=5) # parameter for decision tree para_dt = { "criterion": ["gini", "entropy"], "max_depth": np.arange(1, 50), "min_samples_leaf": [1, 2, 4, 5, 10, 20, 30, 40, 50, 80, 100], } grid_dt = GridSearchCV(dt, param_grid=para_dt, cv=5) # parameter for Random Forest params_rf = { "n_estimators": [100, 200, 350, 500], "min_samples_leaf": [2, 10, 30, 50, 80, 100], } grid_rf = GridSearchCV(rf, param_grid=params_rf, cv=5) # parameters for AdaBoost params_ada = { "n_estimators": [50, 100, 250, 400, 500], "learning_rate": [0.1, 0.001, 0.2, 0.5, 0.8, 1], } grid_ada = GridSearchCV(ada, param_grid=params_ada, cv=5) # paraameter for XGBoost params_xgb = { "n_estimators": [50, 100, 250, 600, 800, 1000], "learning_rate": [0.1, 0.001, 0.2, 0.5, 0.8, 1], } rs_xgb = RandomizedSearchCV(xgb, param_distributions=params_xgb, cv=5) grid_knn.fit(X_train, y_train) grid_dt.fit(X_train, y_train) grid_rf.fit(X_train, y_train) grid_ada.fit(X_train, y_train) rs_xgb.fit(X_train, y_train) print("Best parameters for KNN:", grid_knn.best_params_) print("Best parameters for Decision Tree:", grid_dt.best_params_) print("Best parameters for Random Forest:", grid_rf.best_params_) print("Best parameters for AdaBoost:", grid_ada.best_params_) print("Best parameters for XGBoost:", rs_xgb.best_params_) # ### Here we find the best parameter to train our model # ### Now apply this in some models lr = LogisticRegression(random_state=42) dt = DecisionTreeClassifier( criterion="gini", max_depth=29, min_samples_leaf=4, random_state=42 ) knn = KNeighborsClassifier(n_neighbors=9) rf = RandomForestClassifier(n_estimators=100, min_samples_leaf=2, random_state=42) ada = AdaBoostClassifier(n_estimators=100, learning_rate=1) xgb = XGBClassifier(n_estimators=100, learning_rate=0.1) classifiers = [ ("Logistic Regression", lr), ("K Nearest Neighbours", knn), ("Decision Tree", dt), ("Random Forest", rf), ("AdaBoost", ada), ("XGBoost", xgb), ] from sklearn.metrics import accuracy_score for classifier_name, classifier in classifiers: # Fit classifier to training set classifier.fit(X_train, y_train) # predict y_pred y_pred = classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) # Evaluation the test set print("{:s} : {:.2f}".format(classifier_name, accuracy)) # ### We can see from the model predictions's that we get the best accuracy from XGBoost(98%).Linear Regressionand AdaBoost has 96% accuracy that is quite good. def print_classifier_reports(classifiers, X_train, y_train, X_test, y_test): for name, clf in classifiers: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(f"Classification report for {name}:") print(classification_report(y_test, y_pred)) print_classifier_reports(classifiers, X_train, y_train, X_test, y_test) def print_confusion_matrix(classifiers, X_train, y_train, X_test, y_test): for name, clf in classifiers: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) cm = confusion_matrix(y_test, y_pred) ax = plt.subplot() sns.heatmap(cm, annot=True, ax=ax, cmap="Blues") ax.set_xlabel("Predicted") ax.set_ylabel("Actual") ax.set_title(name) plt.show() print_confusion_matrix(classifiers, X_train, y_train, X_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/753/129753665.ipynb
cancer-data
erdemtaha
[{"Id": 129753665, "ScriptId": 38588012, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7521829, "CreationDate": "05/16/2023 08:28:27", "VersionNumber": 1.0, "Title": "Cancer test prediction with 98% accuracy", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 292.0, "LinesInsertedFromPrevious": 292.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 186107927, "KernelVersionId": 129753665, "SourceDatasetVersionId": 5212576}]
[{"Id": 5212576, "DatasetId": 3032092, "DatasourceVersionId": 5284991, "CreatorUserId": 2498226, "LicenseName": "Other (specified in description)", "CreationDate": "03/22/2023 07:57:00", "VersionNumber": 1.0, "Title": "Cancer Data", "Slug": "cancer-data", "Subtitle": "Benign and malignant cancer data", "Description": "**570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant**\n\n**Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3032092, "CreatorUserId": 2498226, "OwnerUserId": 2498226.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5212576.0, "CurrentDatasourceVersionId": 5284991.0, "ForumId": 3071494, "Type": 2, "CreationDate": "03/22/2023 07:57:00", "LastActivityDate": "03/22/2023", "TotalViews": 66608, "TotalDownloads": 11493, "TotalVotes": 209, "TotalKernels": 70}]
[{"Id": 2498226, "UserName": "erdemtaha", "DisplayName": "Erdem Taha", "RegisterDate": "11/15/2018", "PerformanceTier": 1}]
# # Import necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from yellowbrick.classifier import ConfusionMatrix # # Import dataset df = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv") df # #### Here we can see all the categorical and numerical data df.info() df.dtypes df.describe() # #### Drop the id and unnamed column because they are unique id df = df.drop(["id", "Unnamed: 32"], axis=1) # #### Checking the null values df.isnull().sum() # # Data analysis sns.countplot(x="diagnosis", data=df) df.diagnosis.value_counts(normalize=True) # #### Here we can see that about 62% is Benign cancer (B) and rest is Malignant cancer (M) def plot_histograms(data, columns): fig, axes = plt.subplots(nrows=len(columns), ncols=1, figsize=(20, 20)) for i, col in enumerate(columns): sns.histplot(data[col], kde=False, ax=axes[i]) axes[i].set_xlabel(col) plt.tight_layout() plt.show() # #### Here we can see the continous data of our dataset plot_columns = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave points_mean", "symmetry_mean", "fractal_dimension_mean", ] plot_histograms(df, plot_columns) plot_columns = [ "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave points_se", "symmetry_se", "fractal_dimension_se", ] plot_histograms(df, plot_columns) plot_columns = [ "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave points_worst", "symmetry_worst", "fractal_dimension_worst", ] plot_histograms(df, plot_columns) def box_plot(data, columns): fig, axes = plt.subplots(nrows=len(columns), ncols=1, figsize=(20, 25)) for i, col in enumerate(columns): sns.boxplot(data[col], ax=axes[i]) axes[i].set_xlabel(col) plt.tight_layout() plt.show() # #### Here we create boxplots to visualize the outliers plot_columns = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave points_mean", "symmetry_mean", "fractal_dimension_mean", ] box_plot(df, plot_columns) plot_columns = [ "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave points_se", "symmetry_se", "fractal_dimension_se", ] box_plot(df, plot_columns) plot_columns = [ "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave points_worst", "symmetry_worst", "fractal_dimension_worst", ] box_plot(df, plot_columns) # #### Here we visualize the dependent variables in the dataset. So I create correlation corr = df.corr().round(2) plt.figure(figsize=(25, 20)) sns.heatmap(corr, annot=True, cmap="crest") # # Data preprocessing # # Remove outliers df.shape # #### Removing the outliers with percentile method. Above the 20% data consider as outliers ## Remove the outliers def drop_outliers(data, feature): iqr = 1.5 * (np.percentile(data[feature], 80) - np.percentile(data[feature], 10)) data.drop( data[data[feature] > (iqr + np.percentile(data[feature], 80))].index, inplace=True, ) data.drop( data[data[feature] < (np.percentile(data[feature], 10) - iqr)].index, inplace=True, ) # select all numeric columns numeric_cols = df.select_dtypes(include=np.number).columns.tolist() # apply outlier removal to each numeric feature for feature in numeric_cols: drop_outliers(df, feature) df.shape # # Encode catagorical data df.head() label_encoder = LabelEncoder() df["diagnosis"] = label_encoder.fit_transform(df["diagnosis"]) df["diagnosis"].values X = df.drop("diagnosis", axis=1) X = X.values y = df["diagnosis"] # # Spliting the data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Normalization # #### Here I use standardScale to put the data into same scale scaler = StandardScaler() X_train_std = scaler.fit_transform(X_train) X_test_std = scaler.transform(X_test) # # Apply PCA # #### dimensionality reduction technique used to transform high-dimensional data into a lower-dimensional representation while preserving the essential information in the data. # #### In our dataset previously we have seen that there are many highly correlation variables. # ### Apply PCA on train data pca = PCA(n_components=X_train.shape[1]) pca.fit(X_train_std) # ### Determine the optimal number of components explained_variance = pca.explained_variance_ratio_ cumulative_variance = np.cumsum(explained_variance) n_components = np.argmax(cumulative_variance >= 0.95) + 1 print(f"selected num of components:{n_components}") # #### Transform the traing and test sets with selected n_components pca = PCA(n_components=n_components) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test_std) X_train_pca # # Model selection from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from xgboost import XGBClassifier from sklearn.model_selection import RandomizedSearchCV, GridSearchCV # # Hyper-parameter tunning lr = LogisticRegression(random_state=42) knn = KNeighborsClassifier() dt = DecisionTreeClassifier() rf = RandomForestClassifier() ada = AdaBoostClassifier() xgb = XGBClassifier(eval_metric="logloss", use_label_encoder=False) # parameter for KNN para_knn = {"n_neighbors": np.arange(1, 50)} grid_knn = GridSearchCV(knn, param_grid=para_knn, cv=5) # parameter for decision tree para_dt = { "criterion": ["gini", "entropy"], "max_depth": np.arange(1, 50), "min_samples_leaf": [1, 2, 4, 5, 10, 20, 30, 40, 50, 80, 100], } grid_dt = GridSearchCV(dt, param_grid=para_dt, cv=5) # parameter for Random Forest params_rf = { "n_estimators": [100, 200, 350, 500], "min_samples_leaf": [2, 10, 30, 50, 80, 100], } grid_rf = GridSearchCV(rf, param_grid=params_rf, cv=5) # parameters for AdaBoost params_ada = { "n_estimators": [50, 100, 250, 400, 500], "learning_rate": [0.1, 0.001, 0.2, 0.5, 0.8, 1], } grid_ada = GridSearchCV(ada, param_grid=params_ada, cv=5) # paraameter for XGBoost params_xgb = { "n_estimators": [50, 100, 250, 600, 800, 1000], "learning_rate": [0.1, 0.001, 0.2, 0.5, 0.8, 1], } rs_xgb = RandomizedSearchCV(xgb, param_distributions=params_xgb, cv=5) grid_knn.fit(X_train, y_train) grid_dt.fit(X_train, y_train) grid_rf.fit(X_train, y_train) grid_ada.fit(X_train, y_train) rs_xgb.fit(X_train, y_train) print("Best parameters for KNN:", grid_knn.best_params_) print("Best parameters for Decision Tree:", grid_dt.best_params_) print("Best parameters for Random Forest:", grid_rf.best_params_) print("Best parameters for AdaBoost:", grid_ada.best_params_) print("Best parameters for XGBoost:", rs_xgb.best_params_) # ### Here we find the best parameter to train our model # ### Now apply this in some models lr = LogisticRegression(random_state=42) dt = DecisionTreeClassifier( criterion="gini", max_depth=29, min_samples_leaf=4, random_state=42 ) knn = KNeighborsClassifier(n_neighbors=9) rf = RandomForestClassifier(n_estimators=100, min_samples_leaf=2, random_state=42) ada = AdaBoostClassifier(n_estimators=100, learning_rate=1) xgb = XGBClassifier(n_estimators=100, learning_rate=0.1) classifiers = [ ("Logistic Regression", lr), ("K Nearest Neighbours", knn), ("Decision Tree", dt), ("Random Forest", rf), ("AdaBoost", ada), ("XGBoost", xgb), ] from sklearn.metrics import accuracy_score for classifier_name, classifier in classifiers: # Fit classifier to training set classifier.fit(X_train, y_train) # predict y_pred y_pred = classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) # Evaluation the test set print("{:s} : {:.2f}".format(classifier_name, accuracy)) # ### We can see from the model predictions's that we get the best accuracy from XGBoost(98%).Linear Regressionand AdaBoost has 96% accuracy that is quite good. def print_classifier_reports(classifiers, X_train, y_train, X_test, y_test): for name, clf in classifiers: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(f"Classification report for {name}:") print(classification_report(y_test, y_pred)) print_classifier_reports(classifiers, X_train, y_train, X_test, y_test) def print_confusion_matrix(classifiers, X_train, y_train, X_test, y_test): for name, clf in classifiers: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) cm = confusion_matrix(y_test, y_pred) ax = plt.subplot() sns.heatmap(cm, annot=True, ax=ax, cmap="Blues") ax.set_xlabel("Predicted") ax.set_ylabel("Actual") ax.set_title(name) plt.show() print_confusion_matrix(classifiers, X_train, y_train, X_test, y_test)
false
1
3,202
5
3,292
3,202
129753581
<jupyter_start><jupyter_text>Big Data Certification KR # 빅데이터 분석기사 실기 준비를 위한 캐글 놀이터 안녕하세요 **퇴근후딴짓** 입니다🤗 빅데이터 분석기사 실기 준비를 위한 데이터 셋과 튜토리얼을 공유합니다. 입문자라서 이 자료를 보고 시작할 수 없다면 저의 [퇴근후딴짓 유튜브](https://youtube.com/playlist?list=PLSlDi2AkDv82Qv7B3WiWypQSFmOCb-G_-) 또는 [입문자를 위한 실기 강의](https://inf.run/HYmN)를 추천합니다. 더 좋은 코드를 만든다면 많은 공유 부탁드려요🎉 (Python과 R모두 환영합니다.) 해당 자료가 성장(학습)에 도움이 되었다면 [링크](https://www.kaggle.com/datasets/agileteam/bigdatacertificationkr/discussion/369118)를 통해 후기(피드백)를 부탁해요 ✏️ ## 작업형3 예시문제 - 대응(쌍체)표본 T검정: [파이썬 링크](https://www.kaggle.com/agileteam/t3-example), [R 링크](https://www.kaggle.com/agileteam/t3-example-r) ```풀이 영상: ``` 🖥️ [작업형3](https://bit.ly/3HnIBN7) - 독립표본 T검정: [파이썬 링크](https://www.kaggle.com/agileteam/t3-ttest-ind), [R 링크](https://www.kaggle.com/agileteam/t3-ttest-ind-r) - 단일표본 T검정: [파이썬 링크](https://www.kaggle.com/agileteam/t3-ttest-1samp), [R 링크](https://www.kaggle.com/agileteam/t3-ttest-1samp-r) - 일원배치법: [파이썬 링크](https://www.kaggle.com/agileteam/t3-anova), [R 링크](https://www.kaggle.com/agileteam/t3-anova-r) - 정규성 검정 Shapiro-Wilks [파이썬 링크](https://www.kaggle.com/agileteam/t3-shapiro-wilk), [R 링크] (https://www.kaggle.com/agileteam/t3-shapiro-wilk-r) - 회귀모형(상관계수): [파이썬 링크](https://www.kaggle.com/agileteam/t3-correlation), [R 링크](https://www.kaggle.com/agileteam/t3-correlation-r) ## 5회 기출 유형 ```풀이 영상: ``` 🖥️ [작업형2](https://youtu.be/2n1nFbNf_5g) - 작업형2 유형 데이터 및 컴피티션: [링크] (https://www.kaggle.com/competitions/big-data-analytics-certification-kr-2023-5th/) ## 4회 기출 유형 ```풀이 영상: ``` 🖥️ [작업형1](https://youtu.be/XAT0qvN5tnA), 🖥️ [작업형2](https://youtu.be/diP0q1YzVFg) - 작업형1 유형: [파이썬 링크](https://www.kaggle.com/agileteam/4th-type1-python), [R 링크](https://www.kaggle.com/code/yimstar9/4th-type1) - 작업형2 유형: [풀이](https://www.kaggle.com/agileteam/4th-t2-python) - 작업형2: [컴피티션/데이터 링크](https://www.kaggle.com/competitions/big-data-analytics-certification-kr-2022) -&gt; 작업형2 데이터셋 제공, 최종 답안 제출시 점수 확인 가능 - 필답형: [가답안 링크](https://www.kaggle.com/datasets/agileteam/bigdatacertificationkr/discussion/361256) - 시험환경에서 작업형 풀어보기 📝[작업형1-1](https://level.goorm.io/exam/164115/%ED%8C%90%EB%8B%A4%EC%8A%A4/quiz/1), 📝[작업형1-2](https://level.goorm.io/exam/164118/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EB%B9%84%EC%9C%A8-%EC%A1%B0%EA%B1%B4-%EC%84%A0%ED%83%9D/quiz/1), 📝[작업형1-3](https://level.goorm.io/exam/164119/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EC%8B%9C%EA%B3%84%EC%97%B4-%EC%A1%B0%EA%B1%B4/quiz/1) ## 3회 기출 유형 ```풀이 영상: ``` 🖥️ [작업형1,2](https://youtu.be/QpNufh_ZV7A) - 작업형1 유형: [문제1](https://www.kaggle.com/code/agileteam/3rd-type1-1-3-1-1), [문제2](https://www.kaggle.com/code/agileteam/3rd-type1-2-3-1-2), [문제3](https://www.kaggle.com/code/agileteam/3rd-type1-3-3-1-3) - 작업형2 유형: [풀이 링크](https://www.kaggle.com/code/agileteam/3rd-type2-3-2-baseline) - 작업형2: [컴피티션/데이터 링크](https://www.kaggle.com/competitions/big-data-analytics-certification) -&gt; 작업형2 데이터셋 제공, 최종 답안 제출시 점수 확인 가능 ## 2회 기출 유형 ```풀이 영상: ``` 🖥️ [작업형1](https://youtu.be/Jh3rJaZlEg0) - 작업형1: [파이썬 링크](https://www.kaggle.com/agileteam/tutorial-t1-2-python), [R 링크](https://www.kaggle.com/limmyoungjin/tutorial-t1-2-r-2) - 작업형2 유형: [파이썬 링크](https://www.kaggle.com/agileteam/tutorial-t2-2-python), [R 링크](https://www.kaggle.com/limmyoungjin/tutorial-t2-2-r) 📌 **위 N회 문제는 기출 문제가 아닌 "기출 유형"임을 꼭 참고해주세요!** ## 공식 예시 문제 ```풀이 영상: ``` 🖥️ [작업형1](https://youtu.be/E86QFVXPm5Q), 🖥️ [작업형2](https://youtu.be/_GIBVt5-khk) - 작업형1: P: https://www.kaggle.com/agileteam/tutorial-t1-python R: https://www.kaggle.com/limmyoungjin/tutorial-t1-r - 작업형2: 백화점고객의 1년간 데이터 (dataq 공식 예제) P: https://www.kaggle.com/agileteam/t2-exercise-tutorial-baseline ## 📢 응시 환경(공식 예시 문제 환경) https://goor.me/EvH8T ------------------------------------------ ### 🎁 [인프런] 빅데이터 분식기사 실기 입문 강의 🎁 - https://inf.run/XnzT - 입문자를 위한 강의를 -&gt; **인프런**으로 이전 했어요(클래스101 계약 종료) 👍 - 파이썬-판다스-머신러닝-모의문제-꿀팁 등을 실기 준비에 필요한 내용만 친절하게 알려드려요🎉 - 🆕 작업형3 유형 콘텐츠 추가 ------------------------------------------ ### 📌 빅분기(실기) 관련 영상 모음: - https://youtube.com/playlist?list=PLSlDi2AkDv82Qv7B3WiWypQSFmOCb-G_- ## 📌 작업형1 예상문제 (P:파이썬, R) **Tasks 탭에서 문제 및 코드 확인** - T1-1.Outlier(IQR) / #이상치 #IQR P: https://www.kaggle.com/agileteam/py-t1-1-iqr-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-1-iqr-expected-questions-2 - T1-2.Outlier(age) / #이상치 #소수점나이 P: https://www.kaggle.com/agileteam/py-t1-2-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-2-expected-questions-2 - T1-3. Missing data / #결측치 #삭제 #중앙 #평균 P: https://www.kaggle.com/agileteam/py-t1-3-map-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-3-expected-questions-2 - T1-4. Skewness and Kurtosis (Log Scale) / #왜도 #첨도 #로그스케일 P: https://www.kaggle.com/agileteam/py-t1-4-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-4-expected-questions-2 - T1-5. Standard deviation / #표준편차 P: https://www.kaggle.com/agileteam/py-t1-5-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-5-expected-questions-2 - T1-6. Groupby Sum / #결측치 #조건 P: https://www.kaggle.com/agileteam/py-t1-6-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-6-expected-questions-2 - T1-7. Replace / #값변경 #조건 #최대값 P: https://www.kaggle.com/agileteam/py-t1-7-2-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-7-2-expected-questions-2 - T1-8. Cumulative Sum / #누적합 #결측치 #보간 P: https://www.kaggle.com/agileteam/py-t1-8-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-8-expected-questions-2 - T1-9. Standardization / #표준화 #중앙값 P: https://www.kaggle.com/agileteam/py-t1-9-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-9-expected-questions-2 - T1-10. Yeo-Johnson and Box–Cox / #여존슨 #박스-콕스 #결측치 #최빈값 (출제 가능성 낮음) P: https://www.kaggle.com/agileteam/py-t1-10-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-10-expected-questions-2 - T1-11. min-max scaling / #스케일링 #상하위값 P: https://www.kaggle.com/agileteam/py-t1-11-min-max-5-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-11-min-max-5-expected-questions-2 - T1-12. top10-bottom10 / #그룹핑 #정렬 #상하위값 P: https://www.kaggle.com/agileteam/py-t1-12-10-10-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-12-10-expected-questions-2 - T1-13. Correlation / #상관관계 P: https://www.kaggle.com/agileteam/py-t1-13-expected-questions R: https://www.kaggle.com/limmyoungjin/r-t1-13-expected-questions-2 - T1-14. Multi Index & Groupby / #멀티인덱스 #정렬 #인덱스리셋 #상위값 P: https://www.kaggle.com/agileteam/py-t1-14-2-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-14-2-expected-question-2 - T1-15. Slicing & Condition / #슬라이싱 #결측치 #중앙값 #조건 P: https://www.kaggle.com/agileteam/py-t1-15-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-15-expected-question-2 - T1-16. Variance / #분산 #결측치전후값차이 P: https://www.kaggle.com/agileteam/py-t1-16-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-16-expected-question-2 - T1-17. Time-Series1 / #시계열데이터 #datetime P: https://www.kaggle.com/agileteam/py-t1-17-1-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-17-1-expected-question-2 - T1-18. Time-Series2 / #주말 #평일 #비교 #시계열데이터 P: https://www.kaggle.com/agileteam/py-t1-18-2-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-18-2-expected-question-2 - T1-19. Time-Series3 (monthly total) / #월별 #총계 #비교 #데이터값변경 P: https://www.kaggle.com/agileteam/py-t1-19-3-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-19-3-expected-question-2 - T1-20. Combining Data / 데이터 #병합 #결합 / 고객과 궁합이 맞는 타입 매칭 P: https://www.kaggle.com/agileteam/py-t1-20-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-20-expected-question-2 - T1-21. Binning Data / #비닝 #구간나누기 P: https://www.kaggle.com/agileteam/py-t1-21-expected-question R: https://www.kaggle.com/limmyoungjin/r-t1-21-expected-question-2 - T1-22. Time-Series4 (Weekly data) / #주간 #합계 P: https://www.kaggle.com/agileteam/t1-22-time-series4-weekly-data R: https://www.kaggle.com/limmyoungjin/r-t1-22-time-series4-weekly-data-2 - T1-23. Drop Duplicates / #중복제거 #결측치 #10번째값으로채움 P: https://www.kaggle.com/agileteam/t1-23-drop-duplicates R: https://www.kaggle.com/limmyoungjin/r-t1-23-drop-duplicates-2 - T1-24. Time-Series5 (Lagged Feature) / #시차데이터 #조건 P: https://www.kaggle.com/agileteam/t1-24-time-series5-lagged-feature R: https://www.kaggle.com/limmyoungjin/r-t1-24-time-series5-2 [심화 학습] 🆕 - T1-25. String / #키워드 찾기 #문자 #슬라이싱 P: https://www.kaggle.com/agileteam/t1-25-str-slicing - T1-26. String / #키워드 찾기 #문자 #포함여부 P: https://www.kaggle.com/agileteam/t1-26-str-contains - T1-27. String / #키워드 찾기 #문자 #공백 P: https://www.kaggle.com/agileteam/t1-27-str-contains-replace - T1-28. Frequency / #빈도 #index P: https://www.kaggle.com/agileteam/t1-28-value-counts-index - T1-29. Time-Series (format) # 날짜 형식 P: https://www.kaggle.com/agileteam/t1-29-datetime-format - T1-30. Time-Series #percent P: https://www.kaggle.com/agileteam/t1-30-datetime-percent - T1-31. Melt #재구조화(전체) P: https://www.kaggle.com/agileteam/t1-31-melt - T1-32. Melt #재구조화(일부) P: https://www.kaggle.com/agileteam/t1-33-melt2 - T1-91. Sigmoid / #시그모이드 P: https://www.kaggle.com/agileteam/t1-91-sigmoid - T1-92. ReLU/ #렐루 P: https://www.kaggle.com/agileteam/t1-92-relu ## 📌 작업형2 예상문제 **Tasks 탭에서 문제 및 코드 확인** - T2-1. Titanic (Classification) / 타이타닉 P: https://www.kaggle.com/agileteam/t2-1-titanic-simple-baseline R: https://www.kaggle.com/limmyoungjin/r-t2-1-titanic - T2-2. Pima Indians Diabetes (Classification) / 당뇨병 P: https://www.kaggle.com/agileteam/t2-2-pima-indians-diabetes R: https://www.kaggle.com/limmyoungjin/r-t2-2-pima-indians-diabetes - T2-3. Adult Census Income (Classification) / 성인 인구소득 예측 P: https://www.kaggle.com/agileteam/t2-3-adult-census-income-tutorial R: https://www.kaggle.com/limmyoungjin/r-t2-3-adult-census-income - T2-4. House Prices (Regression) / 집값 예측 / RMSE P: https://www.kaggle.com/code/agileteam/t2-4-house-prices-regression P: https://www.kaggle.com/code/agileteam/house-prices-starter-xgb (XGB 시작하기) R: https://www.kaggle.com/limmyoungjin/r-t2-4-house-prices - T2-5. Insurance Forecast (Regression) / P: https://www.kaggle.com/agileteam/insurance-starter-tutorial R: https://www.kaggle.com/limmyoungjin/r-t2-5-insurance-prediction - T2-6. Bike-sharing-demand (Regression) / 자전거 수요 예측 / RMSLE P: https://www.kaggle.com/code/agileteam/t2-6-bike-regressor R: https://www.kaggle.com/limmyoungjin/r-t2-6-bike-sharing-demand ## 📌 모의고사 ### 모의고사1 - 작업형1: [파이썬 링크](https://www.kaggle.com/agileteam/mock-exam1-type1-1-tutorial), [R 링크](https://www.kaggle.com/limmyoungjin/mock-exam1-type1-1) - 작업형2: [문제 템플릿](https://www.kaggle.com/agileteam/mock-exam-t2-exam-template), [찐 입문 코드](https://www.kaggle.com/agileteam/mock-exam-t2-starter-tutorial), [풀이 코드/베이스라인](https://www.kaggle.com/agileteam/mock-exam-t2-baseline-tutorial) ### 모의고사2 (시험환경) - 작업형1: [링크](https://www.kaggle.com/code/agileteam/mock-exam2-type1-1-2) (시험환경) - 작업형2(회귀): [링크](https://www.kaggle.com/code/agileteam/t2-2-2-baseline-r2) ### 📌 입문자를 위한 머신러닝 튜토리얼 (공유해주신 노트북 중 선정하였음👍) - https://www.kaggle.com/ohseokkim/t2-2-pima-indians-diabetes 작성자: @ohseokkim 😆 - https://www.kaggle.com/wltjd54/insurance-prediction-full-ver 작성자: @wltjd54 👏 ### 📌 시험 전 꼭!! 봐야 할 내용 - 시험환경에서 정당한 컨닝페이퍼 만들기 Guide https://www.kaggle.com/agileteam/tip-guide - 구름 환경에서 작업형1 실전 연습하기 (외부데이터) - 파이썬 https://www.kaggle.com/agileteam/tip-how-to-use-ide - R : https://www.kaggle.com/limmyoungjin/tip-how-to-use-ide - 판다스 통계 함수 간단 정리 https://www.kaggle.com/agileteam/pandas-statistical-function - json, xml 파일 로드 https://www.kaggle.com/agileteam/tip-data-load-json-and-xml ##👋 Code👋 - 활용방법 : 노트북(코드) 클릭 후 우측 상단에 **'copy & edit'** 하면 사용한 데이터 셋과 함께 노트북이 열려요!! - 예시 문제 및 기출 유형 Tutorial - 모의문제 출제 및 풀이 ("kim tae heon" 검색) - 작업형1 : 'T1' 을 검색해주세요! - 작업형2 : 'T2'를 검색해주세요! ## 🦑 실기 준비 스터디 (오징어 게임) 🦑 - 시험 5주 전 멤버 모집 - 시험 4주 전부터 집중 학습 ## 📢 기초 학습 자료 ### 파이썬, 판다스, 머신러닝 / 퇴근후딴짓 - 시험 합격용 속성 기초 강의(유료): https://inf.run/XnzT ### 📌파이썬 / 테디노트 - 파이썬 입문 강의(무료) : https://youtu.be/dpwTOQri42s - 파이썬 전자책(무료) : https://wikidocs.net/book/6708 ### 📌판다스 / 테디노트 - 판다스 입문강의(유료) : https://www.udemy.com/course/pandas-i/ - 판다스 전자책(무료) : https://wikidocs.net/book/4639 ### 오징어게임(스터디 멤버)모집(디스코드): - 6회 준비: (마감) - 7회 준비: (모집전) 함께 공부하며 성장했으면 해요!!!:) 이 자료가 도움이 되었다면 upvote 클릭 부탁드립니다 🙏 ### [안내] -&gt; 책으로 출간할 예정입니다! - 링크가 아닌 복사로 동의 없이 사용 금지 - 본 자료에 대한 허가되지 않은 배포 금지 파 Kaggle dataset identifier: bigdatacertificationkr <jupyter_script># ## 어떤 특정 약물을 복용한 사람들의 평균 체온이 복용하지 않은 사람들의 평균 체온과 유의미하게 다른지 검정해보려고 합니다. # 가정: # - 약물을 복용한 그룹과 복용하지 않은 그룹의 체온 데이터가 각각 주어져 있다고 가정합니다. # - 각 그룹의 체온은 정규분포를 따른다고 가정합니다. # ## 검정통계량, p-value, 검정결과를 출력하시오 # H0: 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 없다. # H1: 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 있다. # ttest_ind vs ttest_rel # stats.ttest_ind(A,B) : Calculate the T-test for the means of two independent samples of scores. # : 독립적인 두 집단에 대한 t-test를 하는 경우 ttest_ind(independent) 사용 # stats.ttest_rel(A,B) : Calculate the t-test on TWO RELATED samples of scores, a and b. # : 전,후 비교와 같은 동일 집단에 대한 t-test를 하는 경우 ttest_rel(relative) from scipy import stats group1 = [36.8, 36.7, 37.1, 36.9, 37.2, 36.8, 36.9, 37.1, 36.7, 37.1] group2 = [36.5, 36.6, 36.3, 36.6, 36.9, 36.7, 36.7, 36.8, 36.5, 36.7] result1 = stats.ttest_ind( group1, group2 ) # alternative는 '같다/다르다에 대한 비교'를 하는 것이므로 alternative에 대한 설정을 따로 하지 않는다 result2 = stats.ttest_ind( group2, group1 ) # 유의미한 차이가 있다 = 두 그룹이 같다/다르다 -> alternative의 default값임 print(result1) # 같다/다르다에 대한 비교를 할 시엔 group1, group2의 입력 순서 상관 x print(result2) result = result1 # result1 = result2를 확인했으므로 result로 결과를 추출함. print("검정통계량:", result.statistic) print("p-value:", result.pvalue) alpha = 0.05 if alpha > result1.pvalue: print("가설검정 : p-value가 유의수준 0.05보다 작으므로 귀무가설을 기각하고 대립가설을 채택한다.") print("결론 : 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 있다.") else: print("가설검정 : p-value가 유의수준 0.05보다 크므로 대립가설을 기각하고 귀무가설을 채택한다.") print("결론 : 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 없다.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/753/129753581.ipynb
bigdatacertificationkr
agileteam
[{"Id": 129753581, "ScriptId": 38587596, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12411964, "CreationDate": "05/16/2023 08:27:48", "VersionNumber": 2.0, "Title": "T2-ttest_ind", "EvaluationDate": "05/16/2023", "IsChange": false, "TotalLines": 46.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 46.0, "LinesInsertedFromFork": 33.0, "LinesDeletedFromFork": 20.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 13.0, "TotalVotes": 0}]
[{"Id": 186107840, "KernelVersionId": 129753581, "SourceDatasetVersionId": 5684653}]
[{"Id": 5684653, "DatasetId": 1633303, "DatasourceVersionId": 5760226, "CreatorUserId": 2487225, "LicenseName": "Attribution-NoDerivatives 4.0 International (CC BY-ND 4.0)", "CreationDate": "05/14/2023 18:06:03", "VersionNumber": 10.0, "Title": "Big Data Certification KR", "Slug": "bigdatacertificationkr", "Subtitle": "\ube45\ub370\uc774\ud130 \ubd84\uc11d\uae30\uc0ac \uc2e4\uae30 (Python, R tutorial code)", "Description": "# \ube45\ub370\uc774\ud130 \ubd84\uc11d\uae30\uc0ac \uc2e4\uae30 \uc900\ube44\ub97c \uc704\ud55c \uce90\uae00 \ub180\uc774\ud130 \n\uc548\ub155\ud558\uc138\uc694 **\ud1f4\uadfc\ud6c4\ub534\uc9d3** \uc785\ub2c8\ub2e4\ud83e\udd17\n\ube45\ub370\uc774\ud130 \ubd84\uc11d\uae30\uc0ac \uc2e4\uae30 \uc900\ube44\ub97c \uc704\ud55c \ub370\uc774\ud130 \uc14b\uacfc \ud29c\ud1a0\ub9ac\uc5bc\uc744 \uacf5\uc720\ud569\ub2c8\ub2e4.\n\uc785\ubb38\uc790\ub77c\uc11c \uc774 \uc790\ub8cc\ub97c \ubcf4\uace0 \uc2dc\uc791\ud560 \uc218 \uc5c6\ub2e4\uba74 \uc800\uc758 [\ud1f4\uadfc\ud6c4\ub534\uc9d3 \uc720\ud29c\ube0c](https://youtube.com/playlist?list=PLSlDi2AkDv82Qv7B3WiWypQSFmOCb-G_-) \ub610\ub294 [\uc785\ubb38\uc790\ub97c \uc704\ud55c \uc2e4\uae30 \uac15\uc758](https://inf.run/HYmN)\ub97c \ucd94\ucc9c\ud569\ub2c8\ub2e4. \n\ub354 \uc88b\uc740 \ucf54\ub4dc\ub97c \ub9cc\ub4e0\ub2e4\uba74 \ub9ce\uc740 \uacf5\uc720 \ubd80\ud0c1\ub4dc\ub824\uc694\ud83c\udf89 (Python\uacfc R\ubaa8\ub450 \ud658\uc601\ud569\ub2c8\ub2e4.)\n\n\ud574\ub2f9 \uc790\ub8cc\uac00 \uc131\uc7a5(\ud559\uc2b5)\uc5d0 \ub3c4\uc6c0\uc774 \ub418\uc5c8\ub2e4\uba74 [\ub9c1\ud06c](https://www.kaggle.com/datasets/agileteam/bigdatacertificationkr/discussion/369118)\ub97c \ud1b5\ud574 \ud6c4\uae30(\ud53c\ub4dc\ubc31)\ub97c \ubd80\ud0c1\ud574\uc694 \u270f\ufe0f\n\n## \uc791\uc5c5\ud6153 \uc608\uc2dc\ubb38\uc81c \n- \ub300\uc751(\uc30d\uccb4)\ud45c\ubcf8 T\uac80\uc815: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-example), [R \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-example-r) ```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6153](https://bit.ly/3HnIBN7)\n- \ub3c5\ub9bd\ud45c\ubcf8 T\uac80\uc815: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-ttest-ind), [R \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-ttest-ind-r) \n- \ub2e8\uc77c\ud45c\ubcf8 T\uac80\uc815: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-ttest-1samp), [R \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-ttest-1samp-r)\n- \uc77c\uc6d0\ubc30\uce58\ubc95: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-anova), [R \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-anova-r)\n\n- \uc815\uaddc\uc131 \uac80\uc815 Shapiro-Wilks [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-shapiro-wilk), [R \ub9c1\ud06c] (https://www.kaggle.com/agileteam/t3-shapiro-wilk-r)\n\n- \ud68c\uadc0\ubaa8\ud615(\uc0c1\uad00\uacc4\uc218): [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-correlation), [R \ub9c1\ud06c](https://www.kaggle.com/agileteam/t3-correlation-r)\n\n## 5\ud68c \uae30\ucd9c \uc720\ud615\n```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6152](https://youtu.be/2n1nFbNf_5g) \n- \uc791\uc5c5\ud6152 \uc720\ud615 \ub370\uc774\ud130 \ubc0f \ucef4\ud53c\ud2f0\uc158: [\ub9c1\ud06c] (https://www.kaggle.com/competitions/big-data-analytics-certification-kr-2023-5th/) \n\n## 4\ud68c \uae30\ucd9c \uc720\ud615\n```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6151](https://youtu.be/XAT0qvN5tnA), \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6152](https://youtu.be/diP0q1YzVFg) \n- \uc791\uc5c5\ud6151 \uc720\ud615: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/4th-type1-python), [R \ub9c1\ud06c](https://www.kaggle.com/code/yimstar9/4th-type1)\n- \uc791\uc5c5\ud6152 \uc720\ud615: [\ud480\uc774](https://www.kaggle.com/agileteam/4th-t2-python)\n- \uc791\uc5c5\ud6152: [\ucef4\ud53c\ud2f0\uc158/\ub370\uc774\ud130 \ub9c1\ud06c](https://www.kaggle.com/competitions/big-data-analytics-certification-kr-2022) -&gt; \uc791\uc5c5\ud6152 \ub370\uc774\ud130\uc14b \uc81c\uacf5, \ucd5c\uc885 \ub2f5\uc548 \uc81c\ucd9c\uc2dc \uc810\uc218 \ud655\uc778 \uac00\ub2a5\n- \ud544\ub2f5\ud615: [\uac00\ub2f5\uc548 \ub9c1\ud06c](https://www.kaggle.com/datasets/agileteam/bigdatacertificationkr/discussion/361256)\n- \uc2dc\ud5d8\ud658\uacbd\uc5d0\uc11c \uc791\uc5c5\ud615 \ud480\uc5b4\ubcf4\uae30 \n\ud83d\udcdd[\uc791\uc5c5\ud6151-1](https://level.goorm.io/exam/164115/%ED%8C%90%EB%8B%A4%EC%8A%A4/quiz/1), \ud83d\udcdd[\uc791\uc5c5\ud6151-2](https://level.goorm.io/exam/164118/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EB%B9%84%EC%9C%A8-%EC%A1%B0%EA%B1%B4-%EC%84%A0%ED%83%9D/quiz/1), \ud83d\udcdd[\uc791\uc5c5\ud6151-3](https://level.goorm.io/exam/164119/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EC%8B%9C%EA%B3%84%EC%97%B4-%EC%A1%B0%EA%B1%B4/quiz/1)\n\n## 3\ud68c \uae30\ucd9c \uc720\ud615\n```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6151,2](https://youtu.be/QpNufh_ZV7A)\n- \uc791\uc5c5\ud6151 \uc720\ud615: [\ubb38\uc81c1](https://www.kaggle.com/code/agileteam/3rd-type1-1-3-1-1), [\ubb38\uc81c2](https://www.kaggle.com/code/agileteam/3rd-type1-2-3-1-2), [\ubb38\uc81c3](https://www.kaggle.com/code/agileteam/3rd-type1-3-3-1-3)\n- \uc791\uc5c5\ud6152 \uc720\ud615: [\ud480\uc774 \ub9c1\ud06c](https://www.kaggle.com/code/agileteam/3rd-type2-3-2-baseline)\n- \uc791\uc5c5\ud6152: [\ucef4\ud53c\ud2f0\uc158/\ub370\uc774\ud130 \ub9c1\ud06c](https://www.kaggle.com/competitions/big-data-analytics-certification) -&gt; \uc791\uc5c5\ud6152 \ub370\uc774\ud130\uc14b \uc81c\uacf5, \ucd5c\uc885 \ub2f5\uc548 \uc81c\ucd9c\uc2dc \uc810\uc218 \ud655\uc778 \uac00\ub2a5\n\n## 2\ud68c \uae30\ucd9c \uc720\ud615\n```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6151](https://youtu.be/Jh3rJaZlEg0)\n- \uc791\uc5c5\ud6151: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/tutorial-t1-2-python), [R \ub9c1\ud06c](https://www.kaggle.com/limmyoungjin/tutorial-t1-2-r-2)\n- \uc791\uc5c5\ud6152 \uc720\ud615: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/tutorial-t2-2-python), [R \ub9c1\ud06c](https://www.kaggle.com/limmyoungjin/tutorial-t2-2-r)\n\n\ud83d\udccc **\uc704 N\ud68c \ubb38\uc81c\ub294 \uae30\ucd9c \ubb38\uc81c\uac00 \uc544\ub2cc \"\uae30\ucd9c \uc720\ud615\"\uc784\uc744 \uaf2d \ucc38\uace0\ud574\uc8fc\uc138\uc694!**\n\n## \uacf5\uc2dd \uc608\uc2dc \ubb38\uc81c\n```\ud480\uc774 \uc601\uc0c1: ``` \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6151](https://youtu.be/E86QFVXPm5Q), \ud83d\udda5\ufe0f [\uc791\uc5c5\ud6152](https://youtu.be/_GIBVt5-khk)\n- \uc791\uc5c5\ud6151: \n P: https://www.kaggle.com/agileteam/tutorial-t1-python\n R: https://www.kaggle.com/limmyoungjin/tutorial-t1-r\n- \uc791\uc5c5\ud6152: \ubc31\ud654\uc810\uace0\uac1d\uc758 1\ub144\uac04 \ub370\uc774\ud130 (dataq \uacf5\uc2dd \uc608\uc81c)\n P: https://www.kaggle.com/agileteam/t2-exercise-tutorial-baseline\n\n## \ud83d\udce2 \uc751\uc2dc \ud658\uacbd(\uacf5\uc2dd \uc608\uc2dc \ubb38\uc81c \ud658\uacbd)\nhttps://goor.me/EvH8T\n\n------------------------------------------\n\n### \ud83c\udf81 [\uc778\ud504\ub7f0] \ube45\ub370\uc774\ud130 \ubd84\uc2dd\uae30\uc0ac \uc2e4\uae30 \uc785\ubb38 \uac15\uc758 \ud83c\udf81\n- https://inf.run/XnzT\n- \uc785\ubb38\uc790\ub97c \uc704\ud55c \uac15\uc758\ub97c -&gt; **\uc778\ud504\ub7f0**\uc73c\ub85c \uc774\uc804 \ud588\uc5b4\uc694(\ud074\ub798\uc2a4101 \uacc4\uc57d \uc885\ub8cc) \ud83d\udc4d \n- \ud30c\uc774\uc36c-\ud310\ub2e4\uc2a4-\uba38\uc2e0\ub7ec\ub2dd-\ubaa8\uc758\ubb38\uc81c-\uafc0\ud301 \ub4f1\uc744 \uc2e4\uae30 \uc900\ube44\uc5d0 \ud544\uc694\ud55c \ub0b4\uc6a9\ub9cc \uce5c\uc808\ud558\uac8c \uc54c\ub824\ub4dc\ub824\uc694\ud83c\udf89\n- \ud83c\udd95 \uc791\uc5c5\ud6153 \uc720\ud615 \ucf58\ud150\uce20 \ucd94\uac00\n\n------------------------------------------\n\n### \ud83d\udccc \ube45\ubd84\uae30(\uc2e4\uae30) \uad00\ub828 \uc601\uc0c1 \ubaa8\uc74c: \n- https://youtube.com/playlist?list=PLSlDi2AkDv82Qv7B3WiWypQSFmOCb-G_-\n\n## \ud83d\udccc \uc791\uc5c5\ud6151 \uc608\uc0c1\ubb38\uc81c (P:\ud30c\uc774\uc36c, R)\n**Tasks \ud0ed\uc5d0\uc11c \ubb38\uc81c \ubc0f \ucf54\ub4dc \ud655\uc778**\n- T1-1.Outlier(IQR) / #\uc774\uc0c1\uce58 #IQR\n P: https://www.kaggle.com/agileteam/py-t1-1-iqr-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-1-iqr-expected-questions-2\n- T1-2.Outlier(age) / #\uc774\uc0c1\uce58 #\uc18c\uc218\uc810\ub098\uc774\n P: https://www.kaggle.com/agileteam/py-t1-2-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-2-expected-questions-2\n- T1-3. Missing data / #\uacb0\uce21\uce58 #\uc0ad\uc81c #\uc911\uc559 #\ud3c9\uade0\n P: https://www.kaggle.com/agileteam/py-t1-3-map-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-3-expected-questions-2\n- T1-4. Skewness and Kurtosis (Log Scale) / #\uc65c\ub3c4 #\ucca8\ub3c4 #\ub85c\uadf8\uc2a4\ucf00\uc77c\n P: https://www.kaggle.com/agileteam/py-t1-4-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-4-expected-questions-2\n- T1-5. Standard deviation / #\ud45c\uc900\ud3b8\ucc28\n P: https://www.kaggle.com/agileteam/py-t1-5-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-5-expected-questions-2\n\n- T1-6. Groupby Sum / #\uacb0\uce21\uce58 #\uc870\uac74\n P: https://www.kaggle.com/agileteam/py-t1-6-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-6-expected-questions-2\n- T1-7. Replace / #\uac12\ubcc0\uacbd #\uc870\uac74 #\ucd5c\ub300\uac12\n P: https://www.kaggle.com/agileteam/py-t1-7-2-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-7-2-expected-questions-2\n- T1-8. Cumulative Sum / #\ub204\uc801\ud569 #\uacb0\uce21\uce58 #\ubcf4\uac04\n P: https://www.kaggle.com/agileteam/py-t1-8-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-8-expected-questions-2\n- T1-9. Standardization / #\ud45c\uc900\ud654 #\uc911\uc559\uac12\n P: https://www.kaggle.com/agileteam/py-t1-9-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-9-expected-questions-2\n- T1-10. Yeo-Johnson and Box\u2013Cox / #\uc5ec\uc874\uc2a8 #\ubc15\uc2a4-\ucf55\uc2a4 #\uacb0\uce21\uce58 #\ucd5c\ube48\uac12 (\ucd9c\uc81c \uac00\ub2a5\uc131 \ub0ae\uc74c)\n P: https://www.kaggle.com/agileteam/py-t1-10-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-10-expected-questions-2\n\n- T1-11. min-max scaling / #\uc2a4\ucf00\uc77c\ub9c1 #\uc0c1\ud558\uc704\uac12\n P: https://www.kaggle.com/agileteam/py-t1-11-min-max-5-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-11-min-max-5-expected-questions-2\n- T1-12. top10-bottom10 / #\uadf8\ub8f9\ud551 #\uc815\ub82c #\uc0c1\ud558\uc704\uac12\n P: https://www.kaggle.com/agileteam/py-t1-12-10-10-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-12-10-expected-questions-2\n- T1-13. Correlation / #\uc0c1\uad00\uad00\uacc4\n P: https://www.kaggle.com/agileteam/py-t1-13-expected-questions\n R: https://www.kaggle.com/limmyoungjin/r-t1-13-expected-questions-2\n- T1-14. Multi Index & Groupby / #\uba40\ud2f0\uc778\ub371\uc2a4 #\uc815\ub82c #\uc778\ub371\uc2a4\ub9ac\uc14b #\uc0c1\uc704\uac12\n P: https://www.kaggle.com/agileteam/py-t1-14-2-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-14-2-expected-question-2\n- T1-15. Slicing & Condition / #\uc2ac\ub77c\uc774\uc2f1 #\uacb0\uce21\uce58 #\uc911\uc559\uac12 #\uc870\uac74\n P: https://www.kaggle.com/agileteam/py-t1-15-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-15-expected-question-2\n- T1-16. Variance / #\ubd84\uc0b0 #\uacb0\uce21\uce58\uc804\ud6c4\uac12\ucc28\uc774\n P: https://www.kaggle.com/agileteam/py-t1-16-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-16-expected-question-2\n- T1-17. Time-Series1 / #\uc2dc\uacc4\uc5f4\ub370\uc774\ud130 #datetime\n P: https://www.kaggle.com/agileteam/py-t1-17-1-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-17-1-expected-question-2\n- T1-18. Time-Series2 / #\uc8fc\ub9d0 #\ud3c9\uc77c #\ube44\uad50 #\uc2dc\uacc4\uc5f4\ub370\uc774\ud130\n P: https://www.kaggle.com/agileteam/py-t1-18-2-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-18-2-expected-question-2\n- T1-19. Time-Series3 (monthly total) / #\uc6d4\ubcc4 #\ucd1d\uacc4 #\ube44\uad50 #\ub370\uc774\ud130\uac12\ubcc0\uacbd \n P: https://www.kaggle.com/agileteam/py-t1-19-3-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-19-3-expected-question-2\n- T1-20. Combining Data / \ub370\uc774\ud130 #\ubcd1\ud569 #\uacb0\ud569 / \uace0\uac1d\uacfc \uad81\ud569\uc774 \ub9de\ub294 \ud0c0\uc785 \ub9e4\uce6d \n P: https://www.kaggle.com/agileteam/py-t1-20-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-20-expected-question-2\n\n- T1-21. Binning Data / #\ube44\ub2dd #\uad6c\uac04\ub098\ub204\uae30\n P: https://www.kaggle.com/agileteam/py-t1-21-expected-question\n R: https://www.kaggle.com/limmyoungjin/r-t1-21-expected-question-2\n- T1-22. Time-Series4 (Weekly data) / #\uc8fc\uac04 #\ud569\uacc4\n P: https://www.kaggle.com/agileteam/t1-22-time-series4-weekly-data\n R: https://www.kaggle.com/limmyoungjin/r-t1-22-time-series4-weekly-data-2\n- T1-23. Drop Duplicates / #\uc911\ubcf5\uc81c\uac70 #\uacb0\uce21\uce58 #10\ubc88\uc9f8\uac12\uc73c\ub85c\ucc44\uc6c0\n P: https://www.kaggle.com/agileteam/t1-23-drop-duplicates\n R: https://www.kaggle.com/limmyoungjin/r-t1-23-drop-duplicates-2\n- T1-24. Time-Series5 (Lagged Feature) / #\uc2dc\ucc28\ub370\uc774\ud130 #\uc870\uac74\n P: https://www.kaggle.com/agileteam/t1-24-time-series5-lagged-feature\n R: https://www.kaggle.com/limmyoungjin/r-t1-24-time-series5-2\n\n[\uc2ec\ud654 \ud559\uc2b5] \ud83c\udd95\n- T1-25. String / #\ud0a4\uc6cc\ub4dc \ucc3e\uae30 #\ubb38\uc790 #\uc2ac\ub77c\uc774\uc2f1\n P: https://www.kaggle.com/agileteam/t1-25-str-slicing\n- T1-26. String / #\ud0a4\uc6cc\ub4dc \ucc3e\uae30 #\ubb38\uc790 #\ud3ec\ud568\uc5ec\ubd80\n P: https://www.kaggle.com/agileteam/t1-26-str-contains\n- T1-27. String / #\ud0a4\uc6cc\ub4dc \ucc3e\uae30 #\ubb38\uc790 #\uacf5\ubc31\n P: https://www.kaggle.com/agileteam/t1-27-str-contains-replace\n- T1-28. Frequency / #\ube48\ub3c4 #index\n P: https://www.kaggle.com/agileteam/t1-28-value-counts-index\n- T1-29. Time-Series (format) # \ub0a0\uc9dc \ud615\uc2dd\n P: https://www.kaggle.com/agileteam/t1-29-datetime-format\n- T1-30. Time-Series #percent\n P: https://www.kaggle.com/agileteam/t1-30-datetime-percent\n- T1-31. Melt #\uc7ac\uad6c\uc870\ud654(\uc804\uccb4)\n P: https://www.kaggle.com/agileteam/t1-31-melt\n- T1-32. Melt #\uc7ac\uad6c\uc870\ud654(\uc77c\ubd80)\n P: https://www.kaggle.com/agileteam/t1-33-melt2\n\n- T1-91. Sigmoid / #\uc2dc\uadf8\ubaa8\uc774\ub4dc\n P: https://www.kaggle.com/agileteam/t1-91-sigmoid\n- T1-92. ReLU/ #\ub810\ub8e8\n P: https://www.kaggle.com/agileteam/t1-92-relu\n\n\n## \ud83d\udccc \uc791\uc5c5\ud6152 \uc608\uc0c1\ubb38\uc81c\n**Tasks \ud0ed\uc5d0\uc11c \ubb38\uc81c \ubc0f \ucf54\ub4dc \ud655\uc778**\n- T2-1. Titanic (Classification) / \ud0c0\uc774\ud0c0\ub2c9\n P: https://www.kaggle.com/agileteam/t2-1-titanic-simple-baseline\n R: https://www.kaggle.com/limmyoungjin/r-t2-1-titanic\n- T2-2. Pima Indians Diabetes (Classification) / \ub2f9\ub1e8\ubcd1\n P: https://www.kaggle.com/agileteam/t2-2-pima-indians-diabetes\n R: https://www.kaggle.com/limmyoungjin/r-t2-2-pima-indians-diabetes\n- T2-3. Adult Census Income (Classification) / \uc131\uc778 \uc778\uad6c\uc18c\ub4dd \uc608\uce21\n P: https://www.kaggle.com/agileteam/t2-3-adult-census-income-tutorial\n R: https://www.kaggle.com/limmyoungjin/r-t2-3-adult-census-income\n- T2-4. House Prices (Regression) / \uc9d1\uac12 \uc608\uce21 / RMSE\n P: https://www.kaggle.com/code/agileteam/t2-4-house-prices-regression\n P: https://www.kaggle.com/code/agileteam/house-prices-starter-xgb (XGB \uc2dc\uc791\ud558\uae30)\n R: https://www.kaggle.com/limmyoungjin/r-t2-4-house-prices\n- T2-5. Insurance Forecast (Regression) /\n P: https://www.kaggle.com/agileteam/insurance-starter-tutorial\n R: https://www.kaggle.com/limmyoungjin/r-t2-5-insurance-prediction\n- T2-6. Bike-sharing-demand (Regression) / \uc790\uc804\uac70 \uc218\uc694 \uc608\uce21 / RMSLE\n P: https://www.kaggle.com/code/agileteam/t2-6-bike-regressor\n R: https://www.kaggle.com/limmyoungjin/r-t2-6-bike-sharing-demand\n\n\n## \ud83d\udccc \ubaa8\uc758\uace0\uc0ac\n### \ubaa8\uc758\uace0\uc0ac1\n- \uc791\uc5c5\ud6151: [\ud30c\uc774\uc36c \ub9c1\ud06c](https://www.kaggle.com/agileteam/mock-exam1-type1-1-tutorial), [R \ub9c1\ud06c](https://www.kaggle.com/limmyoungjin/mock-exam1-type1-1)\n- \uc791\uc5c5\ud6152: [\ubb38\uc81c \ud15c\ud50c\ub9bf](https://www.kaggle.com/agileteam/mock-exam-t2-exam-template), [\ucc10 \uc785\ubb38 \ucf54\ub4dc](https://www.kaggle.com/agileteam/mock-exam-t2-starter-tutorial), [\ud480\uc774 \ucf54\ub4dc/\ubca0\uc774\uc2a4\ub77c\uc778](https://www.kaggle.com/agileteam/mock-exam-t2-baseline-tutorial)\n\n### \ubaa8\uc758\uace0\uc0ac2 (\uc2dc\ud5d8\ud658\uacbd)\n- \uc791\uc5c5\ud6151: [\ub9c1\ud06c](https://www.kaggle.com/code/agileteam/mock-exam2-type1-1-2) (\uc2dc\ud5d8\ud658\uacbd)\n- \uc791\uc5c5\ud6152(\ud68c\uadc0): [\ub9c1\ud06c](https://www.kaggle.com/code/agileteam/t2-2-2-baseline-r2)\n\n### \ud83d\udccc \uc785\ubb38\uc790\ub97c \uc704\ud55c \uba38\uc2e0\ub7ec\ub2dd \ud29c\ud1a0\ub9ac\uc5bc (\uacf5\uc720\ud574\uc8fc\uc2e0 \ub178\ud2b8\ubd81 \uc911 \uc120\uc815\ud558\uc600\uc74c\ud83d\udc4d)\n- https://www.kaggle.com/ohseokkim/t2-2-pima-indians-diabetes \uc791\uc131\uc790: @ohseokkim \ud83d\ude06 \n- https://www.kaggle.com/wltjd54/insurance-prediction-full-ver \uc791\uc131\uc790: @wltjd54 \ud83d\udc4f\n\n### \ud83d\udccc \uc2dc\ud5d8 \uc804 \uaf2d!! \ubd10\uc57c \ud560 \ub0b4\uc6a9\n- \uc2dc\ud5d8\ud658\uacbd\uc5d0\uc11c \uc815\ub2f9\ud55c \ucee8\ub2dd\ud398\uc774\ud37c \ub9cc\ub4e4\uae30 Guide https://www.kaggle.com/agileteam/tip-guide\n- \uad6c\ub984 \ud658\uacbd\uc5d0\uc11c \uc791\uc5c5\ud6151 \uc2e4\uc804 \uc5f0\uc2b5\ud558\uae30 (\uc678\ubd80\ub370\uc774\ud130)\n - \ud30c\uc774\uc36c https://www.kaggle.com/agileteam/tip-how-to-use-ide\n - R : https://www.kaggle.com/limmyoungjin/tip-how-to-use-ide\n- \ud310\ub2e4\uc2a4 \ud1b5\uacc4 \ud568\uc218 \uac04\ub2e8 \uc815\ub9ac https://www.kaggle.com/agileteam/pandas-statistical-function\n- json, xml \ud30c\uc77c \ub85c\ub4dc https://www.kaggle.com/agileteam/tip-data-load-json-and-xml\n\n\n##\ud83d\udc4b Code\ud83d\udc4b \n- \ud65c\uc6a9\ubc29\ubc95 : \ub178\ud2b8\ubd81(\ucf54\ub4dc) \ud074\ub9ad \ud6c4 \uc6b0\uce21 \uc0c1\ub2e8\uc5d0 **'copy & edit'** \ud558\uba74 \uc0ac\uc6a9\ud55c \ub370\uc774\ud130 \uc14b\uacfc \ud568\uaed8 \ub178\ud2b8\ubd81\uc774 \uc5f4\ub824\uc694!!\n- \uc608\uc2dc \ubb38\uc81c \ubc0f \uae30\ucd9c \uc720\ud615 Tutorial \n- \ubaa8\uc758\ubb38\uc81c \ucd9c\uc81c \ubc0f \ud480\uc774 (\"kim tae heon\" \uac80\uc0c9)\n- \uc791\uc5c5\ud6151 : 'T1' \uc744 \uac80\uc0c9\ud574\uc8fc\uc138\uc694!\n- \uc791\uc5c5\ud6152 : 'T2'\ub97c \uac80\uc0c9\ud574\uc8fc\uc138\uc694!\n\n## \ud83e\udd91 \uc2e4\uae30 \uc900\ube44 \uc2a4\ud130\ub514 (\uc624\uc9d5\uc5b4 \uac8c\uc784) \ud83e\udd91\n- \uc2dc\ud5d8 5\uc8fc \uc804 \uba64\ubc84 \ubaa8\uc9d1 \n- \uc2dc\ud5d8 4\uc8fc \uc804\ubd80\ud130 \uc9d1\uc911 \ud559\uc2b5\n\n## \ud83d\udce2 \uae30\ucd08 \ud559\uc2b5 \uc790\ub8cc \n### \ud30c\uc774\uc36c, \ud310\ub2e4\uc2a4, \uba38\uc2e0\ub7ec\ub2dd / \ud1f4\uadfc\ud6c4\ub534\uc9d3\n- \uc2dc\ud5d8 \ud569\uaca9\uc6a9 \uc18d\uc131 \uae30\ucd08 \uac15\uc758(\uc720\ub8cc): https://inf.run/XnzT\n\n### \ud83d\udccc\ud30c\uc774\uc36c / \ud14c\ub514\ub178\ud2b8\n- \ud30c\uc774\uc36c \uc785\ubb38 \uac15\uc758(\ubb34\ub8cc) : https://youtu.be/dpwTOQri42s \n- \ud30c\uc774\uc36c \uc804\uc790\ucc45(\ubb34\ub8cc) : https://wikidocs.net/book/6708\n\n### \ud83d\udccc\ud310\ub2e4\uc2a4 / \ud14c\ub514\ub178\ud2b8\n- \ud310\ub2e4\uc2a4 \uc785\ubb38\uac15\uc758(\uc720\ub8cc) : https://www.udemy.com/course/pandas-i/\n- \ud310\ub2e4\uc2a4 \uc804\uc790\ucc45(\ubb34\ub8cc) : https://wikidocs.net/book/4639\n\n### \uc624\uc9d5\uc5b4\uac8c\uc784(\uc2a4\ud130\ub514 \uba64\ubc84)\ubaa8\uc9d1(\ub514\uc2a4\ucf54\ub4dc): \n- 6\ud68c \uc900\ube44: (\ub9c8\uac10)\n- 7\ud68c \uc900\ube44: (\ubaa8\uc9d1\uc804)\n\n\ud568\uaed8 \uacf5\ubd80\ud558\uba70 \uc131\uc7a5\ud588\uc73c\uba74 \ud574\uc694!!!:) \uc774 \uc790\ub8cc\uac00 \ub3c4\uc6c0\uc774 \ub418\uc5c8\ub2e4\uba74 upvote \ud074\ub9ad \ubd80\ud0c1\ub4dc\ub9bd\ub2c8\ub2e4 \ud83d\ude4f\n\n### [\uc548\ub0b4] -&gt; \ucc45\uc73c\ub85c \ucd9c\uac04\ud560 \uc608\uc815\uc785\ub2c8\ub2e4!\n- \ub9c1\ud06c\uac00 \uc544\ub2cc \ubcf5\uc0ac\ub85c \ub3d9\uc758 \uc5c6\uc774 \uc0ac\uc6a9 \uae08\uc9c0 \n- \ubcf8 \uc790\ub8cc\uc5d0 \ub300\ud55c \ud5c8\uac00\ub418\uc9c0 \uc54a\uc740 \ubc30\ud3ec \uae08\uc9c0 \ud30c", "VersionNotes": "Data Update 2023-05-14", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1633303, "CreatorUserId": 2487225, "OwnerUserId": 2487225.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5684653.0, "CurrentDatasourceVersionId": 5760226.0, "ForumId": 1654019, "Type": 2, "CreationDate": "10/07/2021 05:42:45", "LastActivityDate": "10/07/2021", "TotalViews": 367875, "TotalDownloads": 9762, "TotalVotes": 810, "TotalKernels": 1291}]
[{"Id": 2487225, "UserName": "agileteam", "DisplayName": "Kim Tae Heon", "RegisterDate": "11/13/2018", "PerformanceTier": 2}]
# ## 어떤 특정 약물을 복용한 사람들의 평균 체온이 복용하지 않은 사람들의 평균 체온과 유의미하게 다른지 검정해보려고 합니다. # 가정: # - 약물을 복용한 그룹과 복용하지 않은 그룹의 체온 데이터가 각각 주어져 있다고 가정합니다. # - 각 그룹의 체온은 정규분포를 따른다고 가정합니다. # ## 검정통계량, p-value, 검정결과를 출력하시오 # H0: 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 없다. # H1: 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 있다. # ttest_ind vs ttest_rel # stats.ttest_ind(A,B) : Calculate the T-test for the means of two independent samples of scores. # : 독립적인 두 집단에 대한 t-test를 하는 경우 ttest_ind(independent) 사용 # stats.ttest_rel(A,B) : Calculate the t-test on TWO RELATED samples of scores, a and b. # : 전,후 비교와 같은 동일 집단에 대한 t-test를 하는 경우 ttest_rel(relative) from scipy import stats group1 = [36.8, 36.7, 37.1, 36.9, 37.2, 36.8, 36.9, 37.1, 36.7, 37.1] group2 = [36.5, 36.6, 36.3, 36.6, 36.9, 36.7, 36.7, 36.8, 36.5, 36.7] result1 = stats.ttest_ind( group1, group2 ) # alternative는 '같다/다르다에 대한 비교'를 하는 것이므로 alternative에 대한 설정을 따로 하지 않는다 result2 = stats.ttest_ind( group2, group1 ) # 유의미한 차이가 있다 = 두 그룹이 같다/다르다 -> alternative의 default값임 print(result1) # 같다/다르다에 대한 비교를 할 시엔 group1, group2의 입력 순서 상관 x print(result2) result = result1 # result1 = result2를 확인했으므로 result로 결과를 추출함. print("검정통계량:", result.statistic) print("p-value:", result.pvalue) alpha = 0.05 if alpha > result1.pvalue: print("가설검정 : p-value가 유의수준 0.05보다 작으므로 귀무가설을 기각하고 대립가설을 채택한다.") print("결론 : 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 있다.") else: print("가설검정 : p-value가 유의수준 0.05보다 크므로 대립가설을 기각하고 귀무가설을 채택한다.") print("결론 : 약물을 복용한 그룹과 복용하지 않은 그룹의 평균 체온은 유의미한 차이가 없다.")
false
0
951
0
7,593
951
129753195
<jupyter_start><jupyter_text>Predicting Critical Heat Flux ### Context This dataset was prepared for the journal article entitled "On the prediction of critical heat flux using a physics-informed machine learning-aided framework" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. Kaggle dataset identifier: predicting-heat-flux <jupyter_script># # [Hello!] # **This notebook is a simple-baseline for Playground Series3 Episode15 competition. You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., feature-engineering, tuning, and more). If you referred or copied this notebook, please vote for this. Have fun!** # # 【はじめに】 # **このノートブックは Playground Series3 Episode15 のシンプルなベースラインです。参照や複製は自由ですが、多くの改善を必要とするでしょう(特徴量エンジニアリングやチューニングなど)。もし参照や複製をされた場合は、このノートブックにvoteをお願いします。楽しんでいきましょう!** # import libraries # ライブラリのインポート import gc import math import random import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, KFold from catboost import CatBoostRegressor, Pool import lightgbm as lgb import warnings warnings.simplefilter("ignore") print("imported.") # read data # データの読込 df_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") df_data["data_type"] = 0 df_data df_data.info() # **Number of records are 31644, it is light data. Target is "x_e_out [-]", and there are 9 features except for "id". We need to predict the part of NaN in "x_e_out [-]".** # **レコード件数は31,644件で、軽量データです。目的変数は"x_e_out [-]"で、"id"以外に9個の特徴量があります。目的変数のNaNの部分を予測する必要があります。** df_data.describe() # list of features (numerical/categorical) num_cols = [ "pressure [MPa]", "mass_flux [kg/m2-s]", "x_e_out [-]", "D_e [mm]", "D_h [mm]", "length [mm]", "chf_exp [MW/m2]", ] cat_cols = ["author", "geometry"] # Target: Null / non Null print("Target: x_e_out [-]") print("Null:", df_data["x_e_out [-]"].isnull().sum()) print("not Null:", df_data["x_e_out [-]"].notnull().sum()) # **10415 NaN items are included in the target. The ratio with non-NaN items is approximately 1:2.** # **10415個のNaNデータが目的変数に含まれています。non-NaNとの比はおおよそ1:2です。** # read raw-data df_raw = pd.read_csv("/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv") df_raw["data_type"] = 1 df_raw df_raw.info() df_raw.describe() # simple histgram of data/raw-data # ヒストグラム表示 df_data_hist = df_data[num_cols] df_raw_hist = df_raw[num_cols] fig, axs = plt.subplots(2, 7, figsize=(16, 8)) bins = int(math.log2(len(df_data_hist)) + 1) for row in range(2): for col in range(7): if row == 0: axs[row, col].hist(df_data_hist.iloc[:, col], bins=bins) axs[row, col].set_title(df_data_hist.columns[col]) else: axs[row, col].hist(df_raw_hist.iloc[:, col], bins=bins) axs[row, col].set_title(df_raw_hist.columns[col]) plt.show() # **The upper row is competition data, and the lower row is raw data. The distributions are similar, but we need more investigation and feature engineering.** # **上段がコンペデータで下段がローデータです。分布は似ていますが、さらなる調査と特徴量エンジニアリングが必要です。** # correlation between features in train-data # 特徴量間の相関関係の図示 plt.figure(figsize=(10, 10)) colormap = plt.cm.RdBu sns.heatmap( df_data.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=True, ) # correlation between features in raw-data # 特徴量間の相関関係の図示 plt.figure(figsize=(10, 10)) colormap = plt.cm.RdBu sns.heatmap( df_raw.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=True, ) # split data to train/test dataset train = df_data[df_data["x_e_out [-]"].notnull()] train.sort_values("id", inplace=True) train.reset_index(drop=True, inplace=True) test = df_data[df_data["x_e_out [-]"].isnull()] test.sort_values("id", inplace=True) test.reset_index(drop=True, inplace=True) print(train.shape, test.shape) train # dataset for fitting (train-data + raw-data) train_data = pd.concat([train, df_raw]) train_data.sort_values("id", inplace=True) train_data.reset_index(drop=True, inplace=True) train_data # set x/y data for fitting x_train = train_data.drop(columns=["id", "x_e_out [-]"], axis=1) y_train = train_data["x_e_out [-]"] print(x_train.shape, y_train.shape) x_train # dataset for test x_test = test.drop(columns=["id", "x_e_out [-]"], axis=1) x_test # fitting by LightGBM/CatBoost and Prediction with K-Fold cross-validation def train_gbdt(model_type, input_x, input_y, test_x, params, n_splits=5): cv = list( KFold(n_splits=n_splits, shuffle=True, random_state=22).split(input_x, input_y) ) y_test_preds = [] oof = np.zeros(len(input_x)) imp = pd.DataFrame() for nfold in np.arange(n_splits): print("-" * 30, "fold:", nfold, "-" * 30) idx_tr, idx_va = cv[nfold][0], cv[nfold][1] x_tr, y_tr = input_x.loc[idx_tr, :], input_y.loc[idx_tr] x_va, y_va = input_x.loc[idx_va, :], input_y.loc[idx_va] print("x/y train-data shapes:", x_tr.shape, y_tr.shape) print("x/y valid-data shapes:", x_va.shape, y_va.shape) # fitting if model_type == "lgb": model = lgb.LGBMRegressor(**params) model.fit( x_tr, y_tr, eval_set=[(x_tr, y_tr), (x_va, y_va)], categorical_feature=cat_cols, early_stopping_rounds=500, verbose=100, ) elif model_type == "cat": model = CatBoostRegressor(**params) train_pool = Pool(x_tr, y_tr, cat_features=cat_cols) valid_pool = Pool(x_va, y_va, cat_features=cat_cols) model.fit( train_pool, eval_set=valid_pool, early_stopping_rounds=500, verbose=100, ) # prediction y_va_pred = model.predict(x_va) oof[idx_va] = y_va_pred y_test_preds.append(model.predict(test_x)) print( "RMSE", nfold, ":", "{:.5f}".format(mean_squared_error(y_va, y_va_pred, squared=False)), ) # importance of features _imp = pd.DataFrame( { "features": input_x.columns, "importance": model.feature_importances_, "nfold": nfold, } ) imp = pd.concat([imp, _imp], axis=0, ignore_index=True) imp = imp.groupby("features")["importance"].agg(["mean", "std"]) imp.columns = ["importance", "importance_std"] imp["importance_cov"] = imp["importance_std"] / imp["importance"] imp = imp.reset_index(drop=False) display(imp.sort_values("importance", ascending=False, ignore_index=True)) return oof, y_test_preds, model, imp print("defined.") # **We defined a function for fitting by CatBoost or LightGBM. We can switch by "model_type".** # **CatBoostまたはLightGBMによる訓練のための関数を定義しました。"model_type"によって切替が可能です。** # fill NaN and cast "object" to "category" for train-data x_train["author"] = x_train["author"].fillna("Unknown") x_train["author"] = x_train["author"].astype("category") x_train["geometry"] = x_train["geometry"].fillna("Unknown") x_train["geometry"] = x_train["geometry"].astype("category") # fill NaN and cast "object" to "category" for test-data x_test["author"] = x_test["author"].fillna("Unknown") x_test["author"] = x_test["author"].astype("category") x_test["geometry"] = x_test["geometry"].fillna("Unknown") x_test["geometry"] = x_test["geometry"].astype("category") print( "Null:", x_train["author"].isnull().sum(), x_train["geometry"].isnull().sum(), x_test["author"].isnull().sum(), x_test["geometry"].isnull().sum(), ) display(x_train.info()) display(x_test.info()) # CatBoost model # parameter n_splits = 5 cat_params = { "loss_function": "RMSE", "n_estimators": 5000, # "learning_rate": 0.05, "random_state": 45, # "l2_leaf_reg": 3.0, # "bootstrap_type": "Bayesian", # "bagging_temperature": 1.0, # "subsample": 1.0, # "random_strength": 1.0, # "depth": 6, # "grow_policy": "Lossguide", # "grow_policy": "Depthwise", # "max_leaves": 31, # "od_type": "Iter", # "od_wait": 20, # "border_count": 254, } # fitting oof_cat, y_test_preds_cat, model_cat, imp_cat = train_gbdt( "cat", x_train, y_train, x_test, cat_params, n_splits ) print("fitting done.") # **We need more Hyper Paramete tuning.** # **さらなるハイパーパラメータチューニングが必要です。** # RMSE for CatBoost print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, oof_cat, squared=False))) # visualization of predictions by test-data # テストデータによる予測結果の可視化 mean_preds_cat = np.mean(y_test_preds_cat, axis=0) sns.histplot(mean_preds_cat) # **in this notebook, only use CatBoost model to predict. pls use LightGBM model below as needed.** # **このノートブックでは、CatBoostモデルのみ用いて予測します。必要に応じて以下のLightGBMモデルを利用してください。** # additional # rename features for LightGBM # x_train_lgb = x_train.rename(columns={"pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp"}) # x_test_lgb = x_test.rename(columns={"pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp"}) # y_train_lgb = train_data["x_e_out [-]"] # y_train_lgb = y_train_lgb.rename("x_e_out") # x_train_lgb # additional # LightGBM model # parameter(default) # n_splits = 5 # lgb_params = { # "boosting_type": "gbdt", # "num_leaves": 31, # "max_depth": 8, # "learning_rate": 0.05, # "n_estimators": 5000, # "objective": "regression", # "metric": "rmse", # "subsample": 0.7, #1.0 # "colsample_bytree": 1.0, # "reg_lambda": 0.0, # "random_state": 45, # "importance_type": "gain", # } # fitting # oof_lgb, y_test_preds_lgb, model_lgb, imp_lgb = train_gbdt("lgb", x_train_lgb, y_train_lgb, x_test_lgb, lgb_params, n_splits) # print("fitting done.") # additional # RMSE for LightGBM # print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, oof_lgb, squared=False))) # visualization of predictions by test-data # テストデータによる予測結果の可視化 # mean_preds_lgb = np.mean(y_test_preds_lgb, axis=0) # sns.histplot(mean_preds_lgb) # additional # RMSE # print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, np.mean([oof_cat, oof_lgb], axis=0), squared=False))) # mean_preds = np.mean([mean_preds_cat, mean_preds_lgb], axis=0) # sns.histplot(mean_preds) # submission # 提出用データの整形・CSV出力 sample_sub = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv") # df_submit = pd.DataFrame({"id": sample_sub["id"], "x_e_out [-]": mean_preds}) df_submit = pd.DataFrame({"id": sample_sub["id"], "x_e_out [-]": mean_preds_cat}) df_submit.to_csv("submission.csv", index=None) print("completed.") df_submit
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/753/129753195.ipynb
predicting-heat-flux
saurabhshahane
[{"Id": 129753195, "ScriptId": 38574603, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14597111, "CreationDate": "05/16/2023 08:23:59", "VersionNumber": 1.0, "Title": "PS-S3-E15_01-SimpleBaseline(EN/JP)_20230516", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 299.0, "LinesInsertedFromPrevious": 299.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 10}]
[{"Id": 186107405, "KernelVersionId": 129753195, "SourceDatasetVersionId": 1921393}]
[{"Id": 1921393, "DatasetId": 1145869, "DatasourceVersionId": 1959907, "CreatorUserId": 2411256, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "02/08/2021 11:44:07", "VersionNumber": 1.0, "Title": "Predicting Critical Heat Flux", "Slug": "predicting-heat-flux", "Subtitle": "prediction of critical heat flux using Machine Learning", "Description": "### Context\n\nThis dataset was prepared for the journal article entitled \"On the prediction of critical heat flux using a physics-informed machine learning-aided framework\" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. \n\n### Acknowledgements\n\nZhao, Xingang (2020), \u201cData for: On the prediction of critical heat flux using a physics-informed machine learning-aided framework\u201d, Mendeley Data, V1, doi: 10.17632/5p5h37tyv7.1", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1145869, "CreatorUserId": 2411256, "OwnerUserId": 2411256.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1921393.0, "CurrentDatasourceVersionId": 1959907.0, "ForumId": 1163376, "Type": 2, "CreationDate": "02/08/2021 11:44:07", "LastActivityDate": "02/08/2021", "TotalViews": 6889, "TotalDownloads": 589, "TotalVotes": 42, "TotalKernels": 78}]
[{"Id": 2411256, "UserName": "saurabhshahane", "DisplayName": "Saurabh Shahane", "RegisterDate": "10/26/2018", "PerformanceTier": 4}]
# # [Hello!] # **This notebook is a simple-baseline for Playground Series3 Episode15 competition. You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., feature-engineering, tuning, and more). If you referred or copied this notebook, please vote for this. Have fun!** # # 【はじめに】 # **このノートブックは Playground Series3 Episode15 のシンプルなベースラインです。参照や複製は自由ですが、多くの改善を必要とするでしょう(特徴量エンジニアリングやチューニングなど)。もし参照や複製をされた場合は、このノートブックにvoteをお願いします。楽しんでいきましょう!** # import libraries # ライブラリのインポート import gc import math import random import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, KFold from catboost import CatBoostRegressor, Pool import lightgbm as lgb import warnings warnings.simplefilter("ignore") print("imported.") # read data # データの読込 df_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") df_data["data_type"] = 0 df_data df_data.info() # **Number of records are 31644, it is light data. Target is "x_e_out [-]", and there are 9 features except for "id". We need to predict the part of NaN in "x_e_out [-]".** # **レコード件数は31,644件で、軽量データです。目的変数は"x_e_out [-]"で、"id"以外に9個の特徴量があります。目的変数のNaNの部分を予測する必要があります。** df_data.describe() # list of features (numerical/categorical) num_cols = [ "pressure [MPa]", "mass_flux [kg/m2-s]", "x_e_out [-]", "D_e [mm]", "D_h [mm]", "length [mm]", "chf_exp [MW/m2]", ] cat_cols = ["author", "geometry"] # Target: Null / non Null print("Target: x_e_out [-]") print("Null:", df_data["x_e_out [-]"].isnull().sum()) print("not Null:", df_data["x_e_out [-]"].notnull().sum()) # **10415 NaN items are included in the target. The ratio with non-NaN items is approximately 1:2.** # **10415個のNaNデータが目的変数に含まれています。non-NaNとの比はおおよそ1:2です。** # read raw-data df_raw = pd.read_csv("/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv") df_raw["data_type"] = 1 df_raw df_raw.info() df_raw.describe() # simple histgram of data/raw-data # ヒストグラム表示 df_data_hist = df_data[num_cols] df_raw_hist = df_raw[num_cols] fig, axs = plt.subplots(2, 7, figsize=(16, 8)) bins = int(math.log2(len(df_data_hist)) + 1) for row in range(2): for col in range(7): if row == 0: axs[row, col].hist(df_data_hist.iloc[:, col], bins=bins) axs[row, col].set_title(df_data_hist.columns[col]) else: axs[row, col].hist(df_raw_hist.iloc[:, col], bins=bins) axs[row, col].set_title(df_raw_hist.columns[col]) plt.show() # **The upper row is competition data, and the lower row is raw data. The distributions are similar, but we need more investigation and feature engineering.** # **上段がコンペデータで下段がローデータです。分布は似ていますが、さらなる調査と特徴量エンジニアリングが必要です。** # correlation between features in train-data # 特徴量間の相関関係の図示 plt.figure(figsize=(10, 10)) colormap = plt.cm.RdBu sns.heatmap( df_data.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=True, ) # correlation between features in raw-data # 特徴量間の相関関係の図示 plt.figure(figsize=(10, 10)) colormap = plt.cm.RdBu sns.heatmap( df_raw.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=True, ) # split data to train/test dataset train = df_data[df_data["x_e_out [-]"].notnull()] train.sort_values("id", inplace=True) train.reset_index(drop=True, inplace=True) test = df_data[df_data["x_e_out [-]"].isnull()] test.sort_values("id", inplace=True) test.reset_index(drop=True, inplace=True) print(train.shape, test.shape) train # dataset for fitting (train-data + raw-data) train_data = pd.concat([train, df_raw]) train_data.sort_values("id", inplace=True) train_data.reset_index(drop=True, inplace=True) train_data # set x/y data for fitting x_train = train_data.drop(columns=["id", "x_e_out [-]"], axis=1) y_train = train_data["x_e_out [-]"] print(x_train.shape, y_train.shape) x_train # dataset for test x_test = test.drop(columns=["id", "x_e_out [-]"], axis=1) x_test # fitting by LightGBM/CatBoost and Prediction with K-Fold cross-validation def train_gbdt(model_type, input_x, input_y, test_x, params, n_splits=5): cv = list( KFold(n_splits=n_splits, shuffle=True, random_state=22).split(input_x, input_y) ) y_test_preds = [] oof = np.zeros(len(input_x)) imp = pd.DataFrame() for nfold in np.arange(n_splits): print("-" * 30, "fold:", nfold, "-" * 30) idx_tr, idx_va = cv[nfold][0], cv[nfold][1] x_tr, y_tr = input_x.loc[idx_tr, :], input_y.loc[idx_tr] x_va, y_va = input_x.loc[idx_va, :], input_y.loc[idx_va] print("x/y train-data shapes:", x_tr.shape, y_tr.shape) print("x/y valid-data shapes:", x_va.shape, y_va.shape) # fitting if model_type == "lgb": model = lgb.LGBMRegressor(**params) model.fit( x_tr, y_tr, eval_set=[(x_tr, y_tr), (x_va, y_va)], categorical_feature=cat_cols, early_stopping_rounds=500, verbose=100, ) elif model_type == "cat": model = CatBoostRegressor(**params) train_pool = Pool(x_tr, y_tr, cat_features=cat_cols) valid_pool = Pool(x_va, y_va, cat_features=cat_cols) model.fit( train_pool, eval_set=valid_pool, early_stopping_rounds=500, verbose=100, ) # prediction y_va_pred = model.predict(x_va) oof[idx_va] = y_va_pred y_test_preds.append(model.predict(test_x)) print( "RMSE", nfold, ":", "{:.5f}".format(mean_squared_error(y_va, y_va_pred, squared=False)), ) # importance of features _imp = pd.DataFrame( { "features": input_x.columns, "importance": model.feature_importances_, "nfold": nfold, } ) imp = pd.concat([imp, _imp], axis=0, ignore_index=True) imp = imp.groupby("features")["importance"].agg(["mean", "std"]) imp.columns = ["importance", "importance_std"] imp["importance_cov"] = imp["importance_std"] / imp["importance"] imp = imp.reset_index(drop=False) display(imp.sort_values("importance", ascending=False, ignore_index=True)) return oof, y_test_preds, model, imp print("defined.") # **We defined a function for fitting by CatBoost or LightGBM. We can switch by "model_type".** # **CatBoostまたはLightGBMによる訓練のための関数を定義しました。"model_type"によって切替が可能です。** # fill NaN and cast "object" to "category" for train-data x_train["author"] = x_train["author"].fillna("Unknown") x_train["author"] = x_train["author"].astype("category") x_train["geometry"] = x_train["geometry"].fillna("Unknown") x_train["geometry"] = x_train["geometry"].astype("category") # fill NaN and cast "object" to "category" for test-data x_test["author"] = x_test["author"].fillna("Unknown") x_test["author"] = x_test["author"].astype("category") x_test["geometry"] = x_test["geometry"].fillna("Unknown") x_test["geometry"] = x_test["geometry"].astype("category") print( "Null:", x_train["author"].isnull().sum(), x_train["geometry"].isnull().sum(), x_test["author"].isnull().sum(), x_test["geometry"].isnull().sum(), ) display(x_train.info()) display(x_test.info()) # CatBoost model # parameter n_splits = 5 cat_params = { "loss_function": "RMSE", "n_estimators": 5000, # "learning_rate": 0.05, "random_state": 45, # "l2_leaf_reg": 3.0, # "bootstrap_type": "Bayesian", # "bagging_temperature": 1.0, # "subsample": 1.0, # "random_strength": 1.0, # "depth": 6, # "grow_policy": "Lossguide", # "grow_policy": "Depthwise", # "max_leaves": 31, # "od_type": "Iter", # "od_wait": 20, # "border_count": 254, } # fitting oof_cat, y_test_preds_cat, model_cat, imp_cat = train_gbdt( "cat", x_train, y_train, x_test, cat_params, n_splits ) print("fitting done.") # **We need more Hyper Paramete tuning.** # **さらなるハイパーパラメータチューニングが必要です。** # RMSE for CatBoost print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, oof_cat, squared=False))) # visualization of predictions by test-data # テストデータによる予測結果の可視化 mean_preds_cat = np.mean(y_test_preds_cat, axis=0) sns.histplot(mean_preds_cat) # **in this notebook, only use CatBoost model to predict. pls use LightGBM model below as needed.** # **このノートブックでは、CatBoostモデルのみ用いて予測します。必要に応じて以下のLightGBMモデルを利用してください。** # additional # rename features for LightGBM # x_train_lgb = x_train.rename(columns={"pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp"}) # x_test_lgb = x_test.rename(columns={"pressure [MPa]": "pressure", "mass_flux [kg/m2-s]": "mass_flux", "D_e [mm]": "D_e", "D_h [mm]": "D_h", "length [mm]": "length", "chf_exp [MW/m2]": "chf_exp"}) # y_train_lgb = train_data["x_e_out [-]"] # y_train_lgb = y_train_lgb.rename("x_e_out") # x_train_lgb # additional # LightGBM model # parameter(default) # n_splits = 5 # lgb_params = { # "boosting_type": "gbdt", # "num_leaves": 31, # "max_depth": 8, # "learning_rate": 0.05, # "n_estimators": 5000, # "objective": "regression", # "metric": "rmse", # "subsample": 0.7, #1.0 # "colsample_bytree": 1.0, # "reg_lambda": 0.0, # "random_state": 45, # "importance_type": "gain", # } # fitting # oof_lgb, y_test_preds_lgb, model_lgb, imp_lgb = train_gbdt("lgb", x_train_lgb, y_train_lgb, x_test_lgb, lgb_params, n_splits) # print("fitting done.") # additional # RMSE for LightGBM # print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, oof_lgb, squared=False))) # visualization of predictions by test-data # テストデータによる予測結果の可視化 # mean_preds_lgb = np.mean(y_test_preds_lgb, axis=0) # sns.histplot(mean_preds_lgb) # additional # RMSE # print("RMSE:", "{:.5f}".format(mean_squared_error(y_train, np.mean([oof_cat, oof_lgb], axis=0), squared=False))) # mean_preds = np.mean([mean_preds_cat, mean_preds_lgb], axis=0) # sns.histplot(mean_preds) # submission # 提出用データの整形・CSV出力 sample_sub = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv") # df_submit = pd.DataFrame({"id": sample_sub["id"], "x_e_out [-]": mean_preds}) df_submit = pd.DataFrame({"id": sample_sub["id"], "x_e_out [-]": mean_preds_cat}) df_submit.to_csv("submission.csv", index=None) print("completed.") df_submit
false
3
3,968
10
4,084
3,968
129629535
# ## Importing Packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import KFold, RandomizedSearchCV from sklearn.ensemble import ( ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, ) from sklearn.ensemble import ( HistGradientBoostingRegressor, VotingRegressor, StackingRegressor, ) from sklearn.svm import SVR, LinearSVR from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import ( LinearRegression, Ridge, Lasso, ElasticNet, SGDRegressor, ) from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.compose import ColumnTransformer from xgboost import XGBRegressor, XGBClassifier from lightgbm import LGBMRegressor from catboost import CatBoostRegressor # ## Loading Data train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") print("Train set columns:", train.columns, "\n Test set columns: ", test.columns) for df in [train, test]: df.drop("id", axis=1, inplace=True) # ## Data Descripition: # * Clonesize m2 The average blueberry clone size in the field # * Honeybee bees/m2/min Honeybee density in the field # * Bumbles bees/m2/min Bumblebee density in the field # * Andrena bees/m2/min Andrena bee density in the field # * Osmia bees/m2/min Osmia bee density in the field # * MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season # * MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature # * AverageOfUpperTRange ℃ The average of the upper band daily air temperature # * MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature # * MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature # * AverageOfLowerTRange ℃ The average of the lower band daily air temperature # * RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero # * AverageRainingDays Day The average of raining days of the entire bloom season # * yield - Target variable def summary(df): print(df.shape) summ = df.describe().T summ["Nunique"] = df.nunique() summ["%unique"] = df.nunique() / len(train) * 100 summ["Null"] = df.isna().sum() summ["Dtypes"] = df.dtypes print( f"there are {len(df)} observations in this set, {len(df[df.duplicated()])} of which are duplicated" ) return summ summary(train) summary(test) # ### Insights from summary: # * There are 18 columns in our dataset # * ID column isn't a feature and should be removed # * There are 16 Features and they are all Float # * There is 1 target variable # * No Null values present # * 7 Duplicated values are present in train set, 3 Duplicated values are present in test set # ## Checking Distribution of Target Variable & 16 Features sns.kdeplot(x="yield", data=train) # #### Target Variable is Normally Distributed fig, ax = plt.subplots(4, 4, figsize=(10, 10), dpi=300) ax = ax.flatten() for i, column in enumerate(test.columns): sns.kdeplot(train[column], ax=ax[i], color="blue") sns.kdeplot(test[column], ax=ax[i], color="red") ax[i].set_title(f"{column} Distribution", size=7) ax[i].set_xlabel(None) fig.suptitle("Distribution of Feature\nper Dataset\n", fontsize=24, fontweight="bold") fig.legend(["Train", "Test"]) plt.tight_layout() # #### Train set features & Test set features have a similar distributions, and since that all of our features are numerical, scaling is necessary def heatmap(df, label=None): plt.figure(figsize=(14, 10), dpi=300) sns.heatmap( df.corr(method="pearson"), cmap="YlOrRd", annot=True, annot_kws={"size": 7} ) plt.title(f"Correlations between features of the {label} set") plt.show() for df, i in [(train, "Train"), (test, "Test")]: heatmap(df, i) # ### Correlation Results: # * TRange features are perfectly correlated # * RainingDays & AverageRainingDays is almost perfectly correlated # * Fruitset, fruitmass, seeds are highly correlated # ## Adversial Validation: adv_train = train.copy() adv_test = test.copy() adv_train.drop("yield", axis=1, inplace=True) adv_train["train-test"] = 0 adv_test["train-test"] = 1 adv_all = pd.concat([adv_train, adv_test]) adv_shuffle = adv_all.sample(frac=1) adv_X = adv_shuffle.drop("train-test", axis=1) adv_y = adv_shuffle["train-test"] from xgboost import cv import xgboost as xgb params = { "objective": "binary:logistic", "eval_metric": "logloss", "learning_rate": 0.05, "max_depth": 5, } XGBdata = xgb.DMatrix(data=adv_X, label=adv_y) cross_val_results = cv( dtrain=XGBdata, params=params, nfold=5, metrics="auc", num_boost_round=200, early_stopping_rounds=20, as_pandas=True, ) print((cross_val_results["test-auc-mean"]).tail(1)) # * AUC score is almost 0.5 which means that our train & test datasets are indistinguishable and we are good to go # ## Model # * This baseline model is created no tuning except for scaling data # * 5-fold Cross validation will be used X = train.copy() y = X.pop("yield") seed = 42 splits = 5 k = KFold(n_splits=splits, random_state=seed, shuffle=True) def cross_val_score(model, cv=k, label=""): X = train.copy() y = X.pop("yield") test_predictions = np.zeros((len(train))) train_predictions = np.zeros((len(train))) train_mae, val_mae = [], [] for train_idx, test_idx in k.split(X, y): X_train, X_test = X.iloc[train_idx], X.iloc[test_idx] y_train, y_test = y.iloc[train_idx], y.iloc[test_idx] model.fit(X_train, y_train) train_preds = model.predict(X_train) test_preds = model.predict(X_test) # train_predictions[train_idx] += train_preds # test_predictions[test_idx] += test_preds train_mae.append(mean_absolute_error(y_train, train_preds)) val_mae.append(mean_absolute_error(y_test, test_preds)) print( f"Val MAE: {np.mean(val_mae):.5f} ± {np.std(val_mae):.5f} | Train MAE: {np.mean(train_mae):.5f} ± {np.std(train_mae):.5f} | {label}" ) return val_mae mae_list = pd.DataFrame() models = [ ("linear", LinearRegression()), ("ridge", Ridge(random_state=seed)), ("lasso", Lasso(random_state=seed, max_iter=1000000)), ("linearsvm", LinearSVR(random_state=seed, max_iter=1000000)), ("et", ExtraTreesRegressor(random_state=seed)), ("rf", RandomForestRegressor(random_state=seed)), ("xgb", XGBRegressor(random_state=seed, eval_metric="mae")), ("lgb", LGBMRegressor(random_state=seed, objective="mae")), ("cb", CatBoostRegressor(random_state=seed, objective="MAE", verbose=0)), ("gb", GradientBoostingRegressor(random_state=seed, loss="absolute_error")), ("knn", KNeighborsRegressor()), ] for label, model in models: mae_list[label] = cross_val_score( Pipeline([("scale", StandardScaler()), (label, model)]), label=label ) # * LGBoost, CatBoost had the best base scores # * Now let's tune their parameters and use then in an ensemble # ## RandomizedSearchCV for 2 best models # Catboost # Didn't run on kaggle since it takes a very long time # model_CBR = CatBoostRegressor(objective='MAE',verbose=0) # parameters = {'depth' : [6,8,10], # 'learning_rate' : [0.01, 0.05, 0.1], # 'iterations' : [30, 50, 100] # } # RCV = RandomizedSearchCV(model_CBR,parameters,n_jobs=-1,cv=5) # X = train.copy() # y = X.pop('yield') # X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42) # RCV.fit(X_train,y_train) # print(np.mean(mean_absolute_error(RCV.predict(X_test),y_test))) # BEST PARAMS : {'learning_rate': 0.1, 'iterations': 100, 'depth': 8} # LGBM # Didn't run on kaggle since it takes a very long time # model_lgbm = LGBMRegressor(objective='MAE',verbose=0) # parameters = { # 'task' : ['predict'], # 'boosting': ['gbdt' ], # 'objective': ['MAE'], # 'num_iterations': [ 1500, 2000,5000 ], # 'learning_rate':[ 0.05, 0.005 ], # 'num_leaves':[ 7, 15, 31 ], # 'max_depth' :[ 10,15,25], # 'min_data_in_leaf':[15,25 ], # 'feature_fraction': [ 0.6, 0.8, 0.9], # 'bagging_fraction': [ 0.6, 0.8 ], # 'bagging_freq': [ 100, 200, 400 ], # } # RCV = RandomizedSearchCV(model_lgbm,parameters,n_jobs=-1,cv=5) # X = train.copy() # y = X.pop('yield') # X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42) # RCV.fit(X_train,y_train) # print(np.mean(mean_absolute_error(RCV.predict(X_test),y_test))) # BEST PARAMS : {'task': 'predict','objective': 'MAE','num_leaves': 31,'num_iterations': 2000,'min_data_in_leaf': 15,'max_depth': 15,'learning_rate': 0.005,'feature_fraction': 0.8,'boosting': 'gbdt','bagging_freq': 100,'bagging_fraction': 0.8} # ## Voting Ensemble Model # After some tries i found out that ensemble perform best with just # LGB & CB in it ensemble_models = [ ("lgb", LGBMRegressor(random_state=seed, objective="mae")), ( "cb", CatBoostRegressor( random_state=seed, objective="MAE", verbose=0, iterations=300, depth=8, learning_rate=0.1, ), ), ] voter = Pipeline( [("scale", StandardScaler()), ("vote", VotingRegressor(ensemble_models))] ) _ = cross_val_score(voter, label="Voting Ensemble") # voter.fit(X,y) # prediction = voter.predict(test) # test_1 = test.copy() # test_1['yield'] = prediction # test_2 = test_1[['id','yield']] # test_2.set_index('id',inplace=True) # test_2.to_csv('submission.csv')
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/629/129629535.ipynb
null
null
[{"Id": 129629535, "ScriptId": 38547095, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8536881, "CreationDate": "05/15/2023 10:50:23", "VersionNumber": 1.0, "Title": "Practice Blueberry Yield", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 276.0, "LinesInsertedFromPrevious": 276.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# ## Importing Packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import KFold, RandomizedSearchCV from sklearn.ensemble import ( ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, ) from sklearn.ensemble import ( HistGradientBoostingRegressor, VotingRegressor, StackingRegressor, ) from sklearn.svm import SVR, LinearSVR from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import ( LinearRegression, Ridge, Lasso, ElasticNet, SGDRegressor, ) from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.compose import ColumnTransformer from xgboost import XGBRegressor, XGBClassifier from lightgbm import LGBMRegressor from catboost import CatBoostRegressor # ## Loading Data train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") print("Train set columns:", train.columns, "\n Test set columns: ", test.columns) for df in [train, test]: df.drop("id", axis=1, inplace=True) # ## Data Descripition: # * Clonesize m2 The average blueberry clone size in the field # * Honeybee bees/m2/min Honeybee density in the field # * Bumbles bees/m2/min Bumblebee density in the field # * Andrena bees/m2/min Andrena bee density in the field # * Osmia bees/m2/min Osmia bee density in the field # * MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season # * MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature # * AverageOfUpperTRange ℃ The average of the upper band daily air temperature # * MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature # * MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature # * AverageOfLowerTRange ℃ The average of the lower band daily air temperature # * RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero # * AverageRainingDays Day The average of raining days of the entire bloom season # * yield - Target variable def summary(df): print(df.shape) summ = df.describe().T summ["Nunique"] = df.nunique() summ["%unique"] = df.nunique() / len(train) * 100 summ["Null"] = df.isna().sum() summ["Dtypes"] = df.dtypes print( f"there are {len(df)} observations in this set, {len(df[df.duplicated()])} of which are duplicated" ) return summ summary(train) summary(test) # ### Insights from summary: # * There are 18 columns in our dataset # * ID column isn't a feature and should be removed # * There are 16 Features and they are all Float # * There is 1 target variable # * No Null values present # * 7 Duplicated values are present in train set, 3 Duplicated values are present in test set # ## Checking Distribution of Target Variable & 16 Features sns.kdeplot(x="yield", data=train) # #### Target Variable is Normally Distributed fig, ax = plt.subplots(4, 4, figsize=(10, 10), dpi=300) ax = ax.flatten() for i, column in enumerate(test.columns): sns.kdeplot(train[column], ax=ax[i], color="blue") sns.kdeplot(test[column], ax=ax[i], color="red") ax[i].set_title(f"{column} Distribution", size=7) ax[i].set_xlabel(None) fig.suptitle("Distribution of Feature\nper Dataset\n", fontsize=24, fontweight="bold") fig.legend(["Train", "Test"]) plt.tight_layout() # #### Train set features & Test set features have a similar distributions, and since that all of our features are numerical, scaling is necessary def heatmap(df, label=None): plt.figure(figsize=(14, 10), dpi=300) sns.heatmap( df.corr(method="pearson"), cmap="YlOrRd", annot=True, annot_kws={"size": 7} ) plt.title(f"Correlations between features of the {label} set") plt.show() for df, i in [(train, "Train"), (test, "Test")]: heatmap(df, i) # ### Correlation Results: # * TRange features are perfectly correlated # * RainingDays & AverageRainingDays is almost perfectly correlated # * Fruitset, fruitmass, seeds are highly correlated # ## Adversial Validation: adv_train = train.copy() adv_test = test.copy() adv_train.drop("yield", axis=1, inplace=True) adv_train["train-test"] = 0 adv_test["train-test"] = 1 adv_all = pd.concat([adv_train, adv_test]) adv_shuffle = adv_all.sample(frac=1) adv_X = adv_shuffle.drop("train-test", axis=1) adv_y = adv_shuffle["train-test"] from xgboost import cv import xgboost as xgb params = { "objective": "binary:logistic", "eval_metric": "logloss", "learning_rate": 0.05, "max_depth": 5, } XGBdata = xgb.DMatrix(data=adv_X, label=adv_y) cross_val_results = cv( dtrain=XGBdata, params=params, nfold=5, metrics="auc", num_boost_round=200, early_stopping_rounds=20, as_pandas=True, ) print((cross_val_results["test-auc-mean"]).tail(1)) # * AUC score is almost 0.5 which means that our train & test datasets are indistinguishable and we are good to go # ## Model # * This baseline model is created no tuning except for scaling data # * 5-fold Cross validation will be used X = train.copy() y = X.pop("yield") seed = 42 splits = 5 k = KFold(n_splits=splits, random_state=seed, shuffle=True) def cross_val_score(model, cv=k, label=""): X = train.copy() y = X.pop("yield") test_predictions = np.zeros((len(train))) train_predictions = np.zeros((len(train))) train_mae, val_mae = [], [] for train_idx, test_idx in k.split(X, y): X_train, X_test = X.iloc[train_idx], X.iloc[test_idx] y_train, y_test = y.iloc[train_idx], y.iloc[test_idx] model.fit(X_train, y_train) train_preds = model.predict(X_train) test_preds = model.predict(X_test) # train_predictions[train_idx] += train_preds # test_predictions[test_idx] += test_preds train_mae.append(mean_absolute_error(y_train, train_preds)) val_mae.append(mean_absolute_error(y_test, test_preds)) print( f"Val MAE: {np.mean(val_mae):.5f} ± {np.std(val_mae):.5f} | Train MAE: {np.mean(train_mae):.5f} ± {np.std(train_mae):.5f} | {label}" ) return val_mae mae_list = pd.DataFrame() models = [ ("linear", LinearRegression()), ("ridge", Ridge(random_state=seed)), ("lasso", Lasso(random_state=seed, max_iter=1000000)), ("linearsvm", LinearSVR(random_state=seed, max_iter=1000000)), ("et", ExtraTreesRegressor(random_state=seed)), ("rf", RandomForestRegressor(random_state=seed)), ("xgb", XGBRegressor(random_state=seed, eval_metric="mae")), ("lgb", LGBMRegressor(random_state=seed, objective="mae")), ("cb", CatBoostRegressor(random_state=seed, objective="MAE", verbose=0)), ("gb", GradientBoostingRegressor(random_state=seed, loss="absolute_error")), ("knn", KNeighborsRegressor()), ] for label, model in models: mae_list[label] = cross_val_score( Pipeline([("scale", StandardScaler()), (label, model)]), label=label ) # * LGBoost, CatBoost had the best base scores # * Now let's tune their parameters and use then in an ensemble # ## RandomizedSearchCV for 2 best models # Catboost # Didn't run on kaggle since it takes a very long time # model_CBR = CatBoostRegressor(objective='MAE',verbose=0) # parameters = {'depth' : [6,8,10], # 'learning_rate' : [0.01, 0.05, 0.1], # 'iterations' : [30, 50, 100] # } # RCV = RandomizedSearchCV(model_CBR,parameters,n_jobs=-1,cv=5) # X = train.copy() # y = X.pop('yield') # X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42) # RCV.fit(X_train,y_train) # print(np.mean(mean_absolute_error(RCV.predict(X_test),y_test))) # BEST PARAMS : {'learning_rate': 0.1, 'iterations': 100, 'depth': 8} # LGBM # Didn't run on kaggle since it takes a very long time # model_lgbm = LGBMRegressor(objective='MAE',verbose=0) # parameters = { # 'task' : ['predict'], # 'boosting': ['gbdt' ], # 'objective': ['MAE'], # 'num_iterations': [ 1500, 2000,5000 ], # 'learning_rate':[ 0.05, 0.005 ], # 'num_leaves':[ 7, 15, 31 ], # 'max_depth' :[ 10,15,25], # 'min_data_in_leaf':[15,25 ], # 'feature_fraction': [ 0.6, 0.8, 0.9], # 'bagging_fraction': [ 0.6, 0.8 ], # 'bagging_freq': [ 100, 200, 400 ], # } # RCV = RandomizedSearchCV(model_lgbm,parameters,n_jobs=-1,cv=5) # X = train.copy() # y = X.pop('yield') # X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42) # RCV.fit(X_train,y_train) # print(np.mean(mean_absolute_error(RCV.predict(X_test),y_test))) # BEST PARAMS : {'task': 'predict','objective': 'MAE','num_leaves': 31,'num_iterations': 2000,'min_data_in_leaf': 15,'max_depth': 15,'learning_rate': 0.005,'feature_fraction': 0.8,'boosting': 'gbdt','bagging_freq': 100,'bagging_fraction': 0.8} # ## Voting Ensemble Model # After some tries i found out that ensemble perform best with just # LGB & CB in it ensemble_models = [ ("lgb", LGBMRegressor(random_state=seed, objective="mae")), ( "cb", CatBoostRegressor( random_state=seed, objective="MAE", verbose=0, iterations=300, depth=8, learning_rate=0.1, ), ), ] voter = Pipeline( [("scale", StandardScaler()), ("vote", VotingRegressor(ensemble_models))] ) _ = cross_val_score(voter, label="Voting Ensemble") # voter.fit(X,y) # prediction = voter.predict(test) # test_1 = test.copy() # test_1['yield'] = prediction # test_2 = test_1[['id','yield']] # test_2.set_index('id',inplace=True) # test_2.to_csv('submission.csv')
false
0
3,300
1
3,300
3,300
129629324
# Titanic Dataset - Data Exploration and EDA ## Introduction """ In this notebook, we will perform data exploration and exploratory data analysis (EDA) on the Titanic dataset. The Titanic dataset contains information about the passengers aboard the Titanic, including their demographics, ticket information, cabin location, and whether or not they survived the disaster. We will explore the dataset, visualize the data, analyze variable distributions, investigate relationships between variables, and extract insights to better understand the factors that might have influenced survival. """ # # **Import required libraries:** # Initially, we will import the required libraries and import the dataset. # Import required libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Load the dataset df = pd.read_csv("/kaggle/input/titanic/train.csv") # # Exploratory Data Analysis (EDA) # ## Overview of the Dataset # * Let's start by getting an overview of the dataset: # Preview the first few rows of the dataset df.head() df.shape # Above is the training dataset of the titanic survival problem. It has **891 rows** (number of passengers), and **12 columns** (data about the passenger) including the target variable “Survived”. # ## Analysis of Variables # Get an overview of the dataset df.info() # By utilizing the info() function, we obtain the data types of the dataset, the count of non-null values, and the memory usage. # Summary statistics for numerical variables df.describe() # Explore the relationship between variables sns.pairplot(df, hue="Survived") plt.show() # Explore the distribution of continuous variables fig, ax = plt.subplots(1, 2, figsize=(12, 5)) sns.histplot(x="Age", data=df, ax=ax[0]) sns.histplot(x="Fare", data=df, ax=ax[1]) plt.show() # ## Check for missing values # # Check for missing values df.isnull().sum() # We already know that there are 177 missing values in the Age column. From the above results, we observe that there are 687 missing values in the ‘Cabin’ column and 2 missing values in the ‘Embarked’ column. It is important to address these missing values before proceeding with further analysis and modeling. # #### Let's now visualize the null value heatmap¶ sns.heatmap(df.isnull(), cmap="viridis", cbar=False) # Since there are only 2 missing values in the 'Embarked' column, it is not discernible in the heatmap # ## Data Preprocessing # * As the 'Cabin' column contains a high number of NaN values, it needs to be processed before performing any analysis. The 'Cabin' column indicates the cabin number of the passenger or NaN for those who didn't have one. In order to handle this column, a new column 'HasCabin' will be created which contains a value of 1 if the passenger has a cabin and 0 for NaN values. def create_feat_has_cabin(df, colname): # if NA => 0 else 1 def _is_nan(x): if isinstance(x, type(np.nan)): return 0 return 1 return df[colname].apply(_is_nan) df["HasCabin"] = create_feat_has_cabin(df, "Cabin") # * We will now replace the missing values in the 'Embarked' column with 'S' to indicate that the passengers embarked from Southampton. def fill_na_embarked(df, colname): return df[colname].fillna("S") df["Embarked"] = fill_na_embarked(df, "Embarked") # * **Likewise**, the **'Age'** column contains a substantial number of missing values. # * **Therefore**, we will impute these missing values by generating random values with the mean as the central tendency and standard deviation (SD) as the measure of spread. # **First, let's compute the mean and SD of the 'Age' column** mean = df["Age"].mean() sd = df["Age"].std() print(mean, sd) # - The average value of the dataset is **29.69** with a standard deviation of **13.53** # - Therefore, we will fill the missing values by selecting a random number between **16 and 43**. def fill_na_age(df, colname): mean = df["Age"].mean() sd = df["Age"].std() def fill_empty(x): if np.isnan(x): return np.random.randint(mean - sd, mean + sd, ()) return x return df[colname].apply(fill_empty).astype(int) df["Age"] = fill_na_age(df, "Age") # ## Fearure Engineering # **Feature engineering** is an important step in the data preprocessing stage. After handling missing values, we can think of new features that may improve the performance of our machine learning model. # One such feature we can create is the 'FamilySize' column by combining the 'SibSp' (Sibling and Spouse) and 'Parch' (Parent and Children) columns. This can provide insights into the size of the family and potentially affect the survival rate. def create_feat_familly_size(df): return df["SibSp"] + df["Parch"] + 1 df["FamilySize"] = create_feat_familly_size(df) # **Alright, completed!** # * Regarding the individuals who were traveling alone, we have added a new column called 'IsAlone' with binary values of 0 and 1. def create_feat_isalone(df, colname): def _is_alone(x): if x == 1: return 1 return 0 return df[colname].apply(_is_alone) df["IsAlone"] = create_feat_isalone(df, "FamilySize") # As previously observed, the Fare column contains zeroes for some passengers and extremely high values for others. Thus, we will categorize the fares into four groups and create a new column called 'CategoricalFare' to store this information. def create_feat_categoricalFare(df, colname): return pd.qcut(df[colname], 4, labels=[0, 1, 2, 3]).astype(int) df["CategoricalFare"] = create_feat_categoricalFare(df, "Fare") # - We have already imputed the missing values in the Age column. # **Now** let's categorize the Age column into 5 categories and create a new column named 'CategoricalAge' to store this categorical data def create_feat_categoricalAge(df, colname): return pd.qcut(df[colname], 5, labels=[0, 1, 2, 3, 4]).astype(int) df["CategoricalAge"] = create_feat_categoricalAge(df, "Age") # **Done!** # Now let's take a look at the Name column. There are various titles present in the dataset such as 'Mr', 'Mrs', 'Miss', 'Master', 'Don', 'Rev', 'Dr', 'Mme', 'Ms', 'Major', 'Lady', 'Sir', 'Mlle', 'Col', 'Capt', 'Countess', and 'Jonkheer'. We can extract the title from each name and categorize them into four categories: Mr, Miss, Mrs, and Rare. Finally, we can store these titles in a new column called 'Title'. import re def create_feat_title(df, colname): def find_title(x): title_search = re.search(" ([A-Za-z]+)\.", x) if title_search: title = title_search.group(1) if title in ["Mlle", "Ms"]: return "Miss" elif title in ["Mme", "Mrs"]: return "Mrs" elif title == "Mr": return "Mr" else: return "Rare" return "" return_title = df[colname].apply(find_title) dict_title = {"Miss": 1, "Mrs": 2, "Mr": 3, "Rare": 4} return return_title.replace(dict_title) df["Title"] = create_feat_title(df, "Name") # * Now we'll convert the values in the 'Sex' column from 'Male' and 'Female' to 1 and 0, respectively, and store them in a new column called 'SexNumerical'. We do this because most machine learning algorithms operate on numerical data, not categorical data like 'Male/Female' or 'Yes/No'. By representing 'Male' and 'Female' as 1 and 0, the algorithm can better understand the difference between the two categories. # * We'll also convert the values in the 'Embarked' column to numerical, using the same rationale. By converting 'Embarked' to a numerical representation, the machine learning algorithm can better understand the relationship between the different values in this column def create_feat_sex(df, colname): def sex(x): if x == "male": return 1 return 0 return df[colname].apply(sex) df["SexNumerical"] = create_feat_sex(df, "Sex") df["Embarked"] = df.Embarked.replace({"S": 0, "C": 1, "Q": 2}) # **Great**, now that we've completed the data cleaning and feature engineering steps, let's verify that there are no remaining null values in the data frame. df.isna().sum() # * We can ignore the missing values in the 'Cabin' column since we have already created a new column 'HasCabin'. Therefore, it is time to drop the unnecessary columns. drop_list = ["PassengerId", "Cabin", "Ticket", "SibSp", "Name"] titanic = df.drop(drop_list, axis=1) # ## Correlation Analysis # * We have completed the Data Cleaning and Pre-processing steps. # * Before proceeding to visualize the data, let's examine the correlation between the variables. corrmat = titanic.corr() corrmat # * The correlation matrix indicates the degree of correlation between different variables in the Titanic Survivor Dataset. The first row of the matrix displays the correlation of each variable with the target variable 'Survived'. # * In order to build an accurate predictive model, we need to identify the variables that have a significant impact on the target variable, either positively or negatively. It's important to consider both high and low correlation values. # * We can visualize the correlations using a heatmap created with the Seaborn library, which provides a clear and aesthetically pleasing representation of the data. colormap = plt.cm.Blues plt.figure(figsize=(14, 10)) sns.heatmap(titanic.corr(), cmap=colormap, annot=True, linewidths=0.2) # * The initial row displays the correlation coefficients between each variable and the target variable. 'HasCabin' and 'CategoricalFare' exhibit high positive correlations with the target variable, while 'NumericalSex' has a negative correlation with the target variable. # ## Univariate Analysis of Target Variable # * Analyzing the target variable is a crucial step in data analysis as it provides insights into the distribution and nature of the variable. In this project, we will perform univariate analysis of our target variable, 'Survived'. # titanic['Survived'].value_counts() # * From the above result, **342 out of 891** passengers in the training data are survived. Let's plot it using count plot. sns.countplot(data=titanic, x="Survived") plt.title("Titanic Survived") plt.show() # * Based on the plot shown above, it appears that there were more passengers who did not survive compared to those who did. # * To further understand the distribution of survival among the passengers, we can create a pie chart to visualize the percentage of passengers who survived. explode = [0, 0.05] titanic["Survived"].value_counts().plot.pie(autopct="%1.2f%%", explode=explode) # * Based on the chart shown above, it can be observed that only **38%** of the passengers survived according to the training data. # * This indicates that there is an imbalance between the classes. # ## Univariate Analysis # * Let's explore the **'Pclass'** column as it appears to have a strong correlation with the target variable. titanic["Pclass"].value_counts() titanic.groupby(["Pclass", "Survived"])["Survived"].count() # * The aforementioned output depicts the distribution of passengers categorized by Pclass and Survived. However, the survival rate for each group remains undisclosed. To obtain a more comprehensive understanding of the data, let us create a visualization that incorporates both Pclass and Survived. sns.catplot(x="Pclass", y="Survived", data=titanic, kind="point") # * The plot shown above is a Point plot, which displays point estimates and confidence intervals. # * The point estimates represent the central tendency of a variable, while the confidence intervals represent the uncertainty around this estimate. # * Based on the plot, it is **evident that first-class passengers had a higher survival rate compared to those in other classes.** # * Moving on to another example of bivariate analysis, we can compare the variables of Sex and Fare. sns.catplot(x="Sex", y="Fare", data=titanic, kind="boxen") # * The improved box plot displayed above suggests that the fare paid by female passengers is, on average, higher than that paid by male passengers. This difference in fares may be attributed to the supplementary amenities provided to female passengers. # ## Multivariate Analysis # * Multivariate Analysis is a powerful tool for exploring the relationships between multiple variables, providing a deeper understanding than Bivariate Analysis. In contrast to Bivariate Analysis, which assumes that the relationship between a variable X and the target variable Y is independent of all other variables, Multivariate Analysis acknowledges that other variables (i.e., a third variable Z) may influence this relationship. Ignoring these variables can be dangerous, as illustrated by the naval code of conduct “Women and children first” which prioritizes saving the lives of women and children in life-threatening situations. While we know that “Survival” is highly correlated with “Sex,” the relationship is further influenced by a third variable, “Age” (specifically, whether the person is a child). # * To model the relationship between multiple variables, we will begin by analyzing data with three variables: “Sex,” “Age,” and “Survival.” We can then expand this analysis to include a fourth variable, such as “Class,” and explore the relationships between all four variables. In this example, we will focus on comparing the variables of “Sex” and “Age.” sns.catplot(x="Sex", y="Age", data=titanic) # * From the previous visualization, we observe that a few elderly men were among the passengers on board the Titanic. # **However**, we cannot draw any significant conclusions by merely examining the age and gender of the passengers. # **Therefore**, let's incorporate the third parameter, "Pclass," to gain a more in-depth understanding of the data. sns.catplot(x="Sex", y="Age", data=titanic, kind="box", hue="Pclass") # * Based on the violin plot, we can see that **most of the older passengers, aged 50 to 80**, were traveling in first class, while the **majority of younger passengers, aged 25 to 35**, were in second and third class. # * It's possible that older passengers traveled in first class because they were wealthy and could afford the higher fares, while younger passengers may have been more budget-conscious. sns.catplot(x="Pclass", y="Age", data=titanic, kind="violin", hue="Sex") # **Now** let's explore how to **compare two continuous variables**, specifically **'Age' and 'Fare'.** sns.jointplot(x="Age", y="Fare", data=titanic, kind="hex") # The typical passengers were aged between 20 and 40, with an average fare ranging from 20𝑡𝑜 # 50. # - The previous plot did not provide much insight, but we can gain a better understanding of the data by examining the relationship between 'Sex', 'Pclass', 'Age', and 'Fare'. sns.relplot(x="Age", y="Fare", data=titanic, row="Sex", col="Pclass")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/629/129629324.ipynb
null
null
[{"Id": 129629324, "ScriptId": 38540731, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8764688, "CreationDate": "05/15/2023 10:48:32", "VersionNumber": 2.0, "Title": "Titanic Dataset", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 339.0, "LinesInsertedFromPrevious": 332.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 7.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Titanic Dataset - Data Exploration and EDA ## Introduction """ In this notebook, we will perform data exploration and exploratory data analysis (EDA) on the Titanic dataset. The Titanic dataset contains information about the passengers aboard the Titanic, including their demographics, ticket information, cabin location, and whether or not they survived the disaster. We will explore the dataset, visualize the data, analyze variable distributions, investigate relationships between variables, and extract insights to better understand the factors that might have influenced survival. """ # # **Import required libraries:** # Initially, we will import the required libraries and import the dataset. # Import required libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Load the dataset df = pd.read_csv("/kaggle/input/titanic/train.csv") # # Exploratory Data Analysis (EDA) # ## Overview of the Dataset # * Let's start by getting an overview of the dataset: # Preview the first few rows of the dataset df.head() df.shape # Above is the training dataset of the titanic survival problem. It has **891 rows** (number of passengers), and **12 columns** (data about the passenger) including the target variable “Survived”. # ## Analysis of Variables # Get an overview of the dataset df.info() # By utilizing the info() function, we obtain the data types of the dataset, the count of non-null values, and the memory usage. # Summary statistics for numerical variables df.describe() # Explore the relationship between variables sns.pairplot(df, hue="Survived") plt.show() # Explore the distribution of continuous variables fig, ax = plt.subplots(1, 2, figsize=(12, 5)) sns.histplot(x="Age", data=df, ax=ax[0]) sns.histplot(x="Fare", data=df, ax=ax[1]) plt.show() # ## Check for missing values # # Check for missing values df.isnull().sum() # We already know that there are 177 missing values in the Age column. From the above results, we observe that there are 687 missing values in the ‘Cabin’ column and 2 missing values in the ‘Embarked’ column. It is important to address these missing values before proceeding with further analysis and modeling. # #### Let's now visualize the null value heatmap¶ sns.heatmap(df.isnull(), cmap="viridis", cbar=False) # Since there are only 2 missing values in the 'Embarked' column, it is not discernible in the heatmap # ## Data Preprocessing # * As the 'Cabin' column contains a high number of NaN values, it needs to be processed before performing any analysis. The 'Cabin' column indicates the cabin number of the passenger or NaN for those who didn't have one. In order to handle this column, a new column 'HasCabin' will be created which contains a value of 1 if the passenger has a cabin and 0 for NaN values. def create_feat_has_cabin(df, colname): # if NA => 0 else 1 def _is_nan(x): if isinstance(x, type(np.nan)): return 0 return 1 return df[colname].apply(_is_nan) df["HasCabin"] = create_feat_has_cabin(df, "Cabin") # * We will now replace the missing values in the 'Embarked' column with 'S' to indicate that the passengers embarked from Southampton. def fill_na_embarked(df, colname): return df[colname].fillna("S") df["Embarked"] = fill_na_embarked(df, "Embarked") # * **Likewise**, the **'Age'** column contains a substantial number of missing values. # * **Therefore**, we will impute these missing values by generating random values with the mean as the central tendency and standard deviation (SD) as the measure of spread. # **First, let's compute the mean and SD of the 'Age' column** mean = df["Age"].mean() sd = df["Age"].std() print(mean, sd) # - The average value of the dataset is **29.69** with a standard deviation of **13.53** # - Therefore, we will fill the missing values by selecting a random number between **16 and 43**. def fill_na_age(df, colname): mean = df["Age"].mean() sd = df["Age"].std() def fill_empty(x): if np.isnan(x): return np.random.randint(mean - sd, mean + sd, ()) return x return df[colname].apply(fill_empty).astype(int) df["Age"] = fill_na_age(df, "Age") # ## Fearure Engineering # **Feature engineering** is an important step in the data preprocessing stage. After handling missing values, we can think of new features that may improve the performance of our machine learning model. # One such feature we can create is the 'FamilySize' column by combining the 'SibSp' (Sibling and Spouse) and 'Parch' (Parent and Children) columns. This can provide insights into the size of the family and potentially affect the survival rate. def create_feat_familly_size(df): return df["SibSp"] + df["Parch"] + 1 df["FamilySize"] = create_feat_familly_size(df) # **Alright, completed!** # * Regarding the individuals who were traveling alone, we have added a new column called 'IsAlone' with binary values of 0 and 1. def create_feat_isalone(df, colname): def _is_alone(x): if x == 1: return 1 return 0 return df[colname].apply(_is_alone) df["IsAlone"] = create_feat_isalone(df, "FamilySize") # As previously observed, the Fare column contains zeroes for some passengers and extremely high values for others. Thus, we will categorize the fares into four groups and create a new column called 'CategoricalFare' to store this information. def create_feat_categoricalFare(df, colname): return pd.qcut(df[colname], 4, labels=[0, 1, 2, 3]).astype(int) df["CategoricalFare"] = create_feat_categoricalFare(df, "Fare") # - We have already imputed the missing values in the Age column. # **Now** let's categorize the Age column into 5 categories and create a new column named 'CategoricalAge' to store this categorical data def create_feat_categoricalAge(df, colname): return pd.qcut(df[colname], 5, labels=[0, 1, 2, 3, 4]).astype(int) df["CategoricalAge"] = create_feat_categoricalAge(df, "Age") # **Done!** # Now let's take a look at the Name column. There are various titles present in the dataset such as 'Mr', 'Mrs', 'Miss', 'Master', 'Don', 'Rev', 'Dr', 'Mme', 'Ms', 'Major', 'Lady', 'Sir', 'Mlle', 'Col', 'Capt', 'Countess', and 'Jonkheer'. We can extract the title from each name and categorize them into four categories: Mr, Miss, Mrs, and Rare. Finally, we can store these titles in a new column called 'Title'. import re def create_feat_title(df, colname): def find_title(x): title_search = re.search(" ([A-Za-z]+)\.", x) if title_search: title = title_search.group(1) if title in ["Mlle", "Ms"]: return "Miss" elif title in ["Mme", "Mrs"]: return "Mrs" elif title == "Mr": return "Mr" else: return "Rare" return "" return_title = df[colname].apply(find_title) dict_title = {"Miss": 1, "Mrs": 2, "Mr": 3, "Rare": 4} return return_title.replace(dict_title) df["Title"] = create_feat_title(df, "Name") # * Now we'll convert the values in the 'Sex' column from 'Male' and 'Female' to 1 and 0, respectively, and store them in a new column called 'SexNumerical'. We do this because most machine learning algorithms operate on numerical data, not categorical data like 'Male/Female' or 'Yes/No'. By representing 'Male' and 'Female' as 1 and 0, the algorithm can better understand the difference between the two categories. # * We'll also convert the values in the 'Embarked' column to numerical, using the same rationale. By converting 'Embarked' to a numerical representation, the machine learning algorithm can better understand the relationship between the different values in this column def create_feat_sex(df, colname): def sex(x): if x == "male": return 1 return 0 return df[colname].apply(sex) df["SexNumerical"] = create_feat_sex(df, "Sex") df["Embarked"] = df.Embarked.replace({"S": 0, "C": 1, "Q": 2}) # **Great**, now that we've completed the data cleaning and feature engineering steps, let's verify that there are no remaining null values in the data frame. df.isna().sum() # * We can ignore the missing values in the 'Cabin' column since we have already created a new column 'HasCabin'. Therefore, it is time to drop the unnecessary columns. drop_list = ["PassengerId", "Cabin", "Ticket", "SibSp", "Name"] titanic = df.drop(drop_list, axis=1) # ## Correlation Analysis # * We have completed the Data Cleaning and Pre-processing steps. # * Before proceeding to visualize the data, let's examine the correlation between the variables. corrmat = titanic.corr() corrmat # * The correlation matrix indicates the degree of correlation between different variables in the Titanic Survivor Dataset. The first row of the matrix displays the correlation of each variable with the target variable 'Survived'. # * In order to build an accurate predictive model, we need to identify the variables that have a significant impact on the target variable, either positively or negatively. It's important to consider both high and low correlation values. # * We can visualize the correlations using a heatmap created with the Seaborn library, which provides a clear and aesthetically pleasing representation of the data. colormap = plt.cm.Blues plt.figure(figsize=(14, 10)) sns.heatmap(titanic.corr(), cmap=colormap, annot=True, linewidths=0.2) # * The initial row displays the correlation coefficients between each variable and the target variable. 'HasCabin' and 'CategoricalFare' exhibit high positive correlations with the target variable, while 'NumericalSex' has a negative correlation with the target variable. # ## Univariate Analysis of Target Variable # * Analyzing the target variable is a crucial step in data analysis as it provides insights into the distribution and nature of the variable. In this project, we will perform univariate analysis of our target variable, 'Survived'. # titanic['Survived'].value_counts() # * From the above result, **342 out of 891** passengers in the training data are survived. Let's plot it using count plot. sns.countplot(data=titanic, x="Survived") plt.title("Titanic Survived") plt.show() # * Based on the plot shown above, it appears that there were more passengers who did not survive compared to those who did. # * To further understand the distribution of survival among the passengers, we can create a pie chart to visualize the percentage of passengers who survived. explode = [0, 0.05] titanic["Survived"].value_counts().plot.pie(autopct="%1.2f%%", explode=explode) # * Based on the chart shown above, it can be observed that only **38%** of the passengers survived according to the training data. # * This indicates that there is an imbalance between the classes. # ## Univariate Analysis # * Let's explore the **'Pclass'** column as it appears to have a strong correlation with the target variable. titanic["Pclass"].value_counts() titanic.groupby(["Pclass", "Survived"])["Survived"].count() # * The aforementioned output depicts the distribution of passengers categorized by Pclass and Survived. However, the survival rate for each group remains undisclosed. To obtain a more comprehensive understanding of the data, let us create a visualization that incorporates both Pclass and Survived. sns.catplot(x="Pclass", y="Survived", data=titanic, kind="point") # * The plot shown above is a Point plot, which displays point estimates and confidence intervals. # * The point estimates represent the central tendency of a variable, while the confidence intervals represent the uncertainty around this estimate. # * Based on the plot, it is **evident that first-class passengers had a higher survival rate compared to those in other classes.** # * Moving on to another example of bivariate analysis, we can compare the variables of Sex and Fare. sns.catplot(x="Sex", y="Fare", data=titanic, kind="boxen") # * The improved box plot displayed above suggests that the fare paid by female passengers is, on average, higher than that paid by male passengers. This difference in fares may be attributed to the supplementary amenities provided to female passengers. # ## Multivariate Analysis # * Multivariate Analysis is a powerful tool for exploring the relationships between multiple variables, providing a deeper understanding than Bivariate Analysis. In contrast to Bivariate Analysis, which assumes that the relationship between a variable X and the target variable Y is independent of all other variables, Multivariate Analysis acknowledges that other variables (i.e., a third variable Z) may influence this relationship. Ignoring these variables can be dangerous, as illustrated by the naval code of conduct “Women and children first” which prioritizes saving the lives of women and children in life-threatening situations. While we know that “Survival” is highly correlated with “Sex,” the relationship is further influenced by a third variable, “Age” (specifically, whether the person is a child). # * To model the relationship between multiple variables, we will begin by analyzing data with three variables: “Sex,” “Age,” and “Survival.” We can then expand this analysis to include a fourth variable, such as “Class,” and explore the relationships between all four variables. In this example, we will focus on comparing the variables of “Sex” and “Age.” sns.catplot(x="Sex", y="Age", data=titanic) # * From the previous visualization, we observe that a few elderly men were among the passengers on board the Titanic. # **However**, we cannot draw any significant conclusions by merely examining the age and gender of the passengers. # **Therefore**, let's incorporate the third parameter, "Pclass," to gain a more in-depth understanding of the data. sns.catplot(x="Sex", y="Age", data=titanic, kind="box", hue="Pclass") # * Based on the violin plot, we can see that **most of the older passengers, aged 50 to 80**, were traveling in first class, while the **majority of younger passengers, aged 25 to 35**, were in second and third class. # * It's possible that older passengers traveled in first class because they were wealthy and could afford the higher fares, while younger passengers may have been more budget-conscious. sns.catplot(x="Pclass", y="Age", data=titanic, kind="violin", hue="Sex") # **Now** let's explore how to **compare two continuous variables**, specifically **'Age' and 'Fare'.** sns.jointplot(x="Age", y="Fare", data=titanic, kind="hex") # The typical passengers were aged between 20 and 40, with an average fare ranging from 20𝑡𝑜 # 50. # - The previous plot did not provide much insight, but we can gain a better understanding of the data by examining the relationship between 'Sex', 'Pclass', 'Age', and 'Fare'. sns.relplot(x="Age", y="Fare", data=titanic, row="Sex", col="Pclass")
false
0
4,099
0
4,099
4,099
129759886
<jupyter_start><jupyter_text>COVID-19 in India ### Context Coronaviruses are a large family of viruses which may cause illness in animals or humans. In humans, several coronaviruses are known to cause respiratory infections ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS) and Severe Acute Respiratory Syndrome (SARS). The most recently discovered coronavirus causes coronavirus disease COVID-19 - World Health Organization The number of new cases are increasing day by day around the world. This dataset has information from the states and union territories of India at daily level. State level data comes from [Ministry of Health & Family Welfare](https://www.mohfw.gov.in/) Testing data and vaccination data comes from [covid19india](https://www.covid19india.org/). Huge thanks to them for their efforts! Update on April 20, 2021: Thanks to the [Team at ISIBang](https://www.isibang.ac.in/~athreya/incovid19/), I was able to get the historical data for the periods that I missed to collect and updated the csv file. ### Content COVID-19 cases at daily level is present in `covid_19_india.csv` file Statewise testing details in `StatewiseTestingDetails.csv` file Travel history dataset by @dheerajmpai - https://www.kaggle.com/dheerajmpai/covidindiatravelhistory Kaggle dataset identifier: covid19-in-india <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns covid_19 = pd.read_csv("covid_19_india.csv") covid_19.head() covid_19.info() covid_19.shape covid_19.drop( ["Sno", "ConfirmedIndianNational", "ConfirmedForeignNational"], axis=1, inplace=True ) covid_19.head() covid_19["State/UnionTerritory"].unique(), covid_19["State/UnionTerritory"].nunique() "Correcting spelling mistakes or impurities" state_correction_dict = { "Bihar****": "Bihar", "Dadra and Nagar Haveli": "Dadra and Nagar Haveli and Daman and Diu", "Madhya Pradesh***": "Madhya Pradesh", "Maharashtra***": "Maharashtra", "Karanataka": "Karnataka", } def state_correction(state): try: return state_correction_dict[state] except: return state covid_19["State/UnionTerritory"] = covid_19["State/UnionTerritory"].apply( state_correction ) covid_19["State/UnionTerritory"].nunique() from datetime import datetime covid_19["Date"] = pd.to_datetime(covid_19["Date"], format="%Y-%m-%d") covid_19.head() # Active_case covid_19["Active_case"] = covid_19["Confirmed"] - ( covid_19["Cured"] + covid_19["Deaths"] ) covid_19.head() "using pivot function to find cured , deaths , confirmed cases State wise" statewise = pd.pivot_table( covid_19, values=["Cured", "Deaths", "Confirmed"], index="State/UnionTerritory", aggfunc="max", margins=True, ) statewise # top ten active by statiwise df_top_10 = covid_19.nlargest(10, ["Active_case"]) df_top_10 df_top_10 = ( covid_19.groupby(["State/UnionTerritory"])["Active_case"] .max() .sort_values(ascending=False) .reset_index() ) df_top = df_top_10.nlargest(10, ["Active_case"]) df_top df_top_death = covid_19.nlargest(10, ["Deaths"]) df_top_10 = ( covid_19.groupby(["State/UnionTerritory"])["Deaths"] .max() .sort_values(ascending=False) .reset_index() ) df_top_death = df_top_10.nlargest(10, ["Deaths"]) df_top_death # Finding recovery rate and deathrate statewise["Recovary_rate"] = statewise["Cured"] * 100 / statewise["Confirmed"] statewise["Deathrate"] = statewise["Deaths"] * 100 / statewise["Confirmed"] statewise = statewise.sort_values(by="Confirmed", ascending=False) statewise.style.background_gradient(cmap="cubehelix") # top active case top10_active_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Active_case", "Date"]] .sort_values(by=["Active_case"], ascending=False) .reset_index() ) fig = plt.figure(figsize=(10, 6)) plt.title("top 10 state with most active case ") ax = sns.barplot( data=top10_active_case.iloc[:10], y="Active_case", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) # top 10 state active case top_10_active_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Active_case", "Date"]] .sort_values(by=["Active_case"], ascending=False) .reset_index() ) fig = plt.figure(figsize=(16, 9)) plt.title("top 10 state with most active case ") ax = sns.barplot( data=top_10_active_case.iloc[:10], y="Active_case", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) plt.xlabel("State") plt.ylabel("Total Active_case") plt.show() top_10_Deaths_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Deaths", "Date"]] .sort_values(by=["Deaths"], ascending=False) .reset_index() ) plt.figure(figsize=(16, 9)) ax = sns.barplot( data=top_10_Deaths_case.iloc[:10], y="Deaths", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) plt.title("Top 10 State where most death occur") plt.xlabel("STATE") plt.ylabel("Total Death") plt.show() plt.figure(figsize=(20, 10)) # ax=sns.lineplot(data=covid_19[covid_19['State/UnionTerritory'].isin(['Maharashtra','Kerala','Karnataka','Tamil Nadu','Delhi'])], X='Date',y='Active_case',hue='State/UnionTerritory') import seaborn as sns ax = sns.lineplot( data=covid_19[ covid_19["State/UnionTerritory"].isin( ["Maharashtra", "Kerala", "Karnataka", "Tamil Nadu", "Delhi"] ) ], x="Date", y="Active_case", hue="State/UnionTerritory", ) ax.set_title("top 5 affected state") vaccine = pd.read_csv("covid_vaccine_statewise.csv") vaccine.head() vaccine.shape vaccine.isnull().sum() vaccine.rename(columns={"Updated On": "Vaccine_Date"}, inplace=True) # student_df_1.rename(columns={"id": "ID"}, inplace=True) vaccine.head() vaccine.info() vaccine.drop( [ "Sputnik V (Doses Administered)", "AEFI", "18-44 Years (Doses Administered)", "45-60 Years (Doses Administered)", "60+ Years (Doses Administered)", ], axis=1, inplace=True, ) vaccine.head() # male vs female male = vaccine["Male(Individuals Vaccinated)"].sum() female = vaccine["Female(Individuals Vaccinated)"].sum() male, female import plotly.express as px from plotly.subplots import make_subplots px.pie(names=["male", "female"], values=[male, female], title="Male and Female v") vaccine_df = vaccine[vaccine["State"] != "India"] vaccine_df.rename(columns={"Total Individuals Vaccinated": "Total"}, inplace=True) # max_vac=vaccine_df.groupby(by='State')['Total'].sum().sort_values(by=['Total'],ascending=False max_va = ( vaccine_df.groupby(by="State") .sum()[["Total"]] .sort_values(by=["Total"], ascending=False) .iloc[:10] .reset_index() ) max_va plt.figure(figsize=(16, 9)) x = sns.barplot( data=max_va, y=max_va.Total, x=max_va.State, linewidth=2, edgecolor="red" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/759/129759886.ipynb
covid19-in-india
sudalairajkumar
[{"Id": 129759886, "ScriptId": 38590544, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12027956, "CreationDate": "05/16/2023 09:21:42", "VersionNumber": 1.0, "Title": "covid_19 Analysis", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 157.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186116850, "KernelVersionId": 129759886, "SourceDatasetVersionId": 2516524}]
[{"Id": 2516524, "DatasetId": 557629, "DatasourceVersionId": 2559305, "CreatorUserId": 71388, "LicenseName": "CC0: Public Domain", "CreationDate": "08/11/2021 04:14:10", "VersionNumber": 237.0, "Title": "COVID-19 in India", "Slug": "covid19-in-india", "Subtitle": "Dataset on Novel Corona Virus Disease 2019 in India", "Description": "### Context\n\nCoronaviruses are a large family of viruses which may cause illness in animals or humans. In humans, several coronaviruses are known to cause respiratory infections ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS) and Severe Acute Respiratory Syndrome (SARS). The most recently discovered coronavirus causes coronavirus disease COVID-19 - World Health Organization\n\nThe number of new cases are increasing day by day around the world. This dataset has information from the states and union territories of India at daily level.\n\nState level data comes from [Ministry of Health & Family Welfare](https://www.mohfw.gov.in/)\n\nTesting data and vaccination data comes from [covid19india](https://www.covid19india.org/). Huge thanks to them for their efforts!\n\nUpdate on April 20, 2021: Thanks to the [Team at ISIBang](https://www.isibang.ac.in/~athreya/incovid19/), I was able to get the historical data for the periods that I missed to collect and updated the csv file.\n\n### Content\n\nCOVID-19 cases at daily level is present in `covid_19_india.csv` file\n\nStatewise testing details in `StatewiseTestingDetails.csv` file \n\nTravel history dataset by @dheerajmpai - https://www.kaggle.com/dheerajmpai/covidindiatravelhistory\n\n\n### Acknowledgements\n\nThanks to Indian [Ministry of Health & Family Welfare](https://www.mohfw.gov.in/) for making the data available to general public. \n\nThanks to [covid19india.org](http://portal.covid19india.org/) for making the individual level details, testing details, vaccination details available to general public.\n\nThanks to [Wikipedia](https://en.wikipedia.org/wiki/List_of_states_and_union_territories_of_India_by_population) for population information.\n\nThanks to the [Team at ISIBang](https://www.isibang.ac.in/~athreya/incovid19/)\n\nPhoto Courtesy - https://hgis.uw.edu/virus/\n\n### Inspiration\n\nLooking for data based suggestions to stop / delay the spread of virus", "VersionNotes": "Data Update 2021/08/11", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 557629, "CreatorUserId": 71388, "OwnerUserId": 71388.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2516524.0, "CurrentDatasourceVersionId": 2559305.0, "ForumId": 571269, "Type": 2, "CreationDate": "03/16/2020 06:24:37", "LastActivityDate": "03/16/2020", "TotalViews": 894555, "TotalDownloads": 204666, "TotalVotes": 1906, "TotalKernels": 548}]
[{"Id": 71388, "UserName": "sudalairajkumar", "DisplayName": "SRK", "RegisterDate": "11/28/2012", "PerformanceTier": 4}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns covid_19 = pd.read_csv("covid_19_india.csv") covid_19.head() covid_19.info() covid_19.shape covid_19.drop( ["Sno", "ConfirmedIndianNational", "ConfirmedForeignNational"], axis=1, inplace=True ) covid_19.head() covid_19["State/UnionTerritory"].unique(), covid_19["State/UnionTerritory"].nunique() "Correcting spelling mistakes or impurities" state_correction_dict = { "Bihar****": "Bihar", "Dadra and Nagar Haveli": "Dadra and Nagar Haveli and Daman and Diu", "Madhya Pradesh***": "Madhya Pradesh", "Maharashtra***": "Maharashtra", "Karanataka": "Karnataka", } def state_correction(state): try: return state_correction_dict[state] except: return state covid_19["State/UnionTerritory"] = covid_19["State/UnionTerritory"].apply( state_correction ) covid_19["State/UnionTerritory"].nunique() from datetime import datetime covid_19["Date"] = pd.to_datetime(covid_19["Date"], format="%Y-%m-%d") covid_19.head() # Active_case covid_19["Active_case"] = covid_19["Confirmed"] - ( covid_19["Cured"] + covid_19["Deaths"] ) covid_19.head() "using pivot function to find cured , deaths , confirmed cases State wise" statewise = pd.pivot_table( covid_19, values=["Cured", "Deaths", "Confirmed"], index="State/UnionTerritory", aggfunc="max", margins=True, ) statewise # top ten active by statiwise df_top_10 = covid_19.nlargest(10, ["Active_case"]) df_top_10 df_top_10 = ( covid_19.groupby(["State/UnionTerritory"])["Active_case"] .max() .sort_values(ascending=False) .reset_index() ) df_top = df_top_10.nlargest(10, ["Active_case"]) df_top df_top_death = covid_19.nlargest(10, ["Deaths"]) df_top_10 = ( covid_19.groupby(["State/UnionTerritory"])["Deaths"] .max() .sort_values(ascending=False) .reset_index() ) df_top_death = df_top_10.nlargest(10, ["Deaths"]) df_top_death # Finding recovery rate and deathrate statewise["Recovary_rate"] = statewise["Cured"] * 100 / statewise["Confirmed"] statewise["Deathrate"] = statewise["Deaths"] * 100 / statewise["Confirmed"] statewise = statewise.sort_values(by="Confirmed", ascending=False) statewise.style.background_gradient(cmap="cubehelix") # top active case top10_active_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Active_case", "Date"]] .sort_values(by=["Active_case"], ascending=False) .reset_index() ) fig = plt.figure(figsize=(10, 6)) plt.title("top 10 state with most active case ") ax = sns.barplot( data=top10_active_case.iloc[:10], y="Active_case", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) # top 10 state active case top_10_active_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Active_case", "Date"]] .sort_values(by=["Active_case"], ascending=False) .reset_index() ) fig = plt.figure(figsize=(16, 9)) plt.title("top 10 state with most active case ") ax = sns.barplot( data=top_10_active_case.iloc[:10], y="Active_case", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) plt.xlabel("State") plt.ylabel("Total Active_case") plt.show() top_10_Deaths_case = ( covid_19.groupby(by="State/UnionTerritory") .max()[["Deaths", "Date"]] .sort_values(by=["Deaths"], ascending=False) .reset_index() ) plt.figure(figsize=(16, 9)) ax = sns.barplot( data=top_10_Deaths_case.iloc[:10], y="Deaths", x="State/UnionTerritory", linewidth=2, edgecolor="red", ) plt.title("Top 10 State where most death occur") plt.xlabel("STATE") plt.ylabel("Total Death") plt.show() plt.figure(figsize=(20, 10)) # ax=sns.lineplot(data=covid_19[covid_19['State/UnionTerritory'].isin(['Maharashtra','Kerala','Karnataka','Tamil Nadu','Delhi'])], X='Date',y='Active_case',hue='State/UnionTerritory') import seaborn as sns ax = sns.lineplot( data=covid_19[ covid_19["State/UnionTerritory"].isin( ["Maharashtra", "Kerala", "Karnataka", "Tamil Nadu", "Delhi"] ) ], x="Date", y="Active_case", hue="State/UnionTerritory", ) ax.set_title("top 5 affected state") vaccine = pd.read_csv("covid_vaccine_statewise.csv") vaccine.head() vaccine.shape vaccine.isnull().sum() vaccine.rename(columns={"Updated On": "Vaccine_Date"}, inplace=True) # student_df_1.rename(columns={"id": "ID"}, inplace=True) vaccine.head() vaccine.info() vaccine.drop( [ "Sputnik V (Doses Administered)", "AEFI", "18-44 Years (Doses Administered)", "45-60 Years (Doses Administered)", "60+ Years (Doses Administered)", ], axis=1, inplace=True, ) vaccine.head() # male vs female male = vaccine["Male(Individuals Vaccinated)"].sum() female = vaccine["Female(Individuals Vaccinated)"].sum() male, female import plotly.express as px from plotly.subplots import make_subplots px.pie(names=["male", "female"], values=[male, female], title="Male and Female v") vaccine_df = vaccine[vaccine["State"] != "India"] vaccine_df.rename(columns={"Total Individuals Vaccinated": "Total"}, inplace=True) # max_vac=vaccine_df.groupby(by='State')['Total'].sum().sort_values(by=['Total'],ascending=False max_va = ( vaccine_df.groupby(by="State") .sum()[["Total"]] .sort_values(by=["Total"], ascending=False) .iloc[:10] .reset_index() ) max_va plt.figure(figsize=(16, 9)) x = sns.barplot( data=max_va, y=max_va.Total, x=max_va.State, linewidth=2, edgecolor="red" )
false
0
1,969
1
2,362
1,969
129654062
<jupyter_start><jupyter_text>Shark Tank India Season 1 Complete - 36 episodes ### Context I love Shark Tank and when Shark Tank India aired, I was pretty excited to see how things would pan out here. While the sharks were okay to see, I liked the fact that I could find about so many new startups and that was very well done. I think this dataset will help people gain a lot of insights about average deals made, how many of them took on loans, how many investors stepped in in which industry etc. ### Content The data describes all the deals made over all 36 episodes of Season 1 of Shark Tank India that aired between Dec 2021 and Feb 2022. Kaggle dataset identifier: shark-tank-india-season-1-complete <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/shark-tank-india-season-1-complete/Shark_Tank_India_S1.csv" ) df.head() df.shape df.columns df.info() df["episode_number"].value_counts().sort_values() df.describe() a = df["anupam_invested"].sum() b = df["peyush_invested"].sum() c = df["aman_invested"].sum() d = df["ghazal_invested"].sum() e = df["ashneer_invested"].sum() def total_invested_by_sharks(str): return df[str].sum() total_invested_by_sharks("ashneer_invested") f = total_invested_by_sharks("namita_invested") g = total_invested_by_sharks("vineeta_invested")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654062.ipynb
shark-tank-india-season-1-complete
sinewaver
[{"Id": 129654062, "ScriptId": 25944796, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7453603, "CreationDate": "05/15/2023 14:01:18", "VersionNumber": 1.0, "Title": "Shark Tank India Season-1", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185948925, "KernelVersionId": 129654062, "SourceDatasetVersionId": 3285388}]
[{"Id": 3285388, "DatasetId": 1989495, "DatasourceVersionId": 3336045, "CreatorUserId": 38936, "LicenseName": "CC0: Public Domain", "CreationDate": "03/11/2022 22:56:03", "VersionNumber": 5.0, "Title": "Shark Tank India Season 1 Complete - 36 episodes", "Slug": "shark-tank-india-season-1-complete", "Subtitle": "This dataset contains the complete info about deals made in 36 episodes- S1", "Description": "### Context\n\nI love Shark Tank and when Shark Tank India aired, I was pretty excited to see how things would pan out here. While the sharks were okay to see, I liked the fact that I could find about so many new startups and that was very well done. I think this dataset will help people gain a lot of insights about average deals made, how many of them took on loans, how many investors stepped in in which industry etc.\n\n### Content\n\nThe data describes all the deals made over all 36 episodes of Season 1 of Shark Tank India that aired between Dec 2021 and Feb 2022.\n\n\n### Acknowledgements\n\nI used previous Shark Tank India datasets to help me build on top of those. Thank you all!\n\n### Inspiration\n\nTBD", "VersionNotes": "Data Update 2022/03/11 - Column name fix", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1989495, "CreatorUserId": 38936, "OwnerUserId": 38936.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3285388.0, "CurrentDatasourceVersionId": 3336045.0, "ForumId": 2013907, "Type": 2, "CreationDate": "03/11/2022 22:01:17", "LastActivityDate": "03/11/2022", "TotalViews": 6008, "TotalDownloads": 363, "TotalVotes": 21, "TotalKernels": 8}]
[{"Id": 38936, "UserName": "sinewaver", "DisplayName": "Abhiram R", "RegisterDate": "04/12/2012", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/shark-tank-india-season-1-complete/Shark_Tank_India_S1.csv" ) df.head() df.shape df.columns df.info() df["episode_number"].value_counts().sort_values() df.describe() a = df["anupam_invested"].sum() b = df["peyush_invested"].sum() c = df["aman_invested"].sum() d = df["ghazal_invested"].sum() e = df["ashneer_invested"].sum() def total_invested_by_sharks(str): return df[str].sum() total_invested_by_sharks("ashneer_invested") f = total_invested_by_sharks("namita_invested") g = total_invested_by_sharks("vineeta_invested")
false
1
409
0
609
409
129654534
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import gc import cv2 import numpy as np import pandas as pd from tqdm import tqdm from shutil import copyfile import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # customize iPython writefile so we can write variables from IPython.core.magic import register_line_cell_magic import torch import ultralytics from ultralytics import YOLO ultralytics.checks() model = YOLO("yolov8m.pt") TRAIN_PATH = "/kaggle/input/global-wheat-detection/train/" IMG_SIZE = 256 BATCH_SIZE = 16 EPOCHS = 10 df = pd.read_csv("/kaggle/input/global-wheat-detection/train.csv") df["id"] = df.apply(lambda row: row.image_id.split("_")[0], axis=1) df["path"] = df.apply(lambda row: TRAIN_PATH + row.id + ".jpg", axis=1) df.head() train_df, valid_df = train_test_split( df, test_size=0.2, random_state=42, stratify=df.source.values ) train_df.loc[:, "split"] = "train" valid_df.loc[:, "split"] = "valid" df = pd.concat([train_df, valid_df]).reset_index(drop=True) print( f"Size of dataset: {len(df)}, training images: {len(train_df)}. validation images: {len(valid_df)}" ) os.makedirs("tmp/wheat/images/train", exist_ok=True) os.makedirs("tmp/wheat/images/valid", exist_ok=True) os.makedirs("tmp/wheat/labels/train", exist_ok=True) os.makedirs("tmp/wheat/labels/valid", exist_ok=True) df.head() # Move the images to relevant split folder. for i in tqdm(range(len(df))): row = df.loc[i] if row.split == "train": copyfile(row.path, f"tmp/wheat/images/train/{row.id}.jpg") else: copyfile(row.path, f"tmp/wheat/images/valid/{row.id}.jpg") # Create .yaml file import yaml data_yaml = dict( train="../wheat/images/train", val="../wheat/images/valid", nc=2, names=["none", "opacity"], ) # Note that I am creating the file in the yolov5/data/ directory. with open("tmp/yolov5/data/data.yaml", "w") as outfile: yaml.dump(data_yaml, outfile, default_flow_style=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654534.ipynb
null
null
[{"Id": 129654534, "ScriptId": 38545065, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8785636, "CreationDate": "05/15/2023 14:04:37", "VersionNumber": 1.0, "Title": "Wheat Detection using YOLO", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 104.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import gc import cv2 import numpy as np import pandas as pd from tqdm import tqdm from shutil import copyfile import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # customize iPython writefile so we can write variables from IPython.core.magic import register_line_cell_magic import torch import ultralytics from ultralytics import YOLO ultralytics.checks() model = YOLO("yolov8m.pt") TRAIN_PATH = "/kaggle/input/global-wheat-detection/train/" IMG_SIZE = 256 BATCH_SIZE = 16 EPOCHS = 10 df = pd.read_csv("/kaggle/input/global-wheat-detection/train.csv") df["id"] = df.apply(lambda row: row.image_id.split("_")[0], axis=1) df["path"] = df.apply(lambda row: TRAIN_PATH + row.id + ".jpg", axis=1) df.head() train_df, valid_df = train_test_split( df, test_size=0.2, random_state=42, stratify=df.source.values ) train_df.loc[:, "split"] = "train" valid_df.loc[:, "split"] = "valid" df = pd.concat([train_df, valid_df]).reset_index(drop=True) print( f"Size of dataset: {len(df)}, training images: {len(train_df)}. validation images: {len(valid_df)}" ) os.makedirs("tmp/wheat/images/train", exist_ok=True) os.makedirs("tmp/wheat/images/valid", exist_ok=True) os.makedirs("tmp/wheat/labels/train", exist_ok=True) os.makedirs("tmp/wheat/labels/valid", exist_ok=True) df.head() # Move the images to relevant split folder. for i in tqdm(range(len(df))): row = df.loc[i] if row.split == "train": copyfile(row.path, f"tmp/wheat/images/train/{row.id}.jpg") else: copyfile(row.path, f"tmp/wheat/images/valid/{row.id}.jpg") # Create .yaml file import yaml data_yaml = dict( train="../wheat/images/train", val="../wheat/images/valid", nc=2, names=["none", "opacity"], ) # Note that I am creating the file in the yolov5/data/ directory. with open("tmp/yolov5/data/data.yaml", "w") as outfile: yaml.dump(data_yaml, outfile, default_flow_style=True)
false
0
851
0
851
851
129654802
<jupyter_start><jupyter_text>Exam-Three-3 Kaggle dataset identifier: input <jupyter_script>from pyhanlp import * from jpype import JString import jieba.analyse.textrank # ## 分词 my_data = open(r"/kaggle/input/input/3.txt", "r", encoding="utf8").read() words2 = jieba.cut(my_data, cut_all=True) file = open(r"./result.txt", "w", encoding="utf8") # 注意a或者w file.write(" ".join(words2)) NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) CorpusLoader = SafeJClass("com.hankcs.hanlp.corpus.document.CorpusLoader") def train_bigram(corpus_path, model_path): sents = CorpusLoader.convert2SentenceList(corpus_path) for sent in sents: # 为兼容hanlp字典格式,为每个单词添加占位符 for word in sent: word.setLabel("n") # 创建maker对象 maker = NatureDictionaryMaker() # 进行一元、二元统计 maker.compute(sents) # 保存文件,会得到三个文件 maker.saveTxtTo(model_path) # unigram # bigram train_bigram("./result.txt", "./part1") corpus_path = "/kaggle/working/result.txt" # 语料库所在路径 sentences = CorpusLoader.convert2SentenceList(corpus_path) # 返回List<List<IWord>>类型 for sent in sentences: print(sent) NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) # 词典模型Java模块(统计一元、二元语法) CorpusLoader = SafeJClass( "com.hankcs.hanlp.corpus.document.CorpusLoader" ) # 语料库加载Java模块 model_path = "/kaggle/working/my_cws_model" # 语言模型存储路径 for sent in sentences: for word in sent: if word.label is None: word.setLabel("n") # 赋予每个单词一个虚拟的n词性用作占位 maker = NatureDictionaryMaker() # 构造NatureDictionaryMaker对象 maker.compute(sentences) # 统计句子中的一元语法、二元语法 maker.saveTxtTo(model_path) # 将统计结果存储到路径 HanLP.Config.CoreDictionaryPath = model_path + ".txt" # 一元语法模型路径 HanLP.Config.BiGramDictionaryPath = model_path + ".ngram.txt" # 二元语法模型路径 CoreDictionary = LazyLoadingJClass( "com.hankcs.hanlp.dictionary.CoreDictionary" ) # 加载一元语法模型Java模块 print(CoreDictionary.getTermFrequency("核酸")) # 测试"核酸"的一元语法频次 CoreBiGramTableDictionary = SafeJClass( "com.hankcs.hanlp.dictionary.CoreBiGramTableDictionary" ) print(CoreBiGramTableDictionary.getBiFrequency("核酸", "检测")) ## 加载 JAVA 类 CorpusLoader = SafeJClass("com.hankcs.hanlp.corpus.document.CorpusLoader") NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) CoreDictionary = LazyLoadingJClass("com.hankcs.hanlp.dictionary.CoreDictionary") WordNet = JClass("com.hankcs.hanlp.seg.common.WordNet") Vertex = JClass("com.hankcs.hanlp.seg.common.Vertex") # # **实验二** from gensim import * from gensim.test.utils import common_texts, get_tmpfile from gensim.models import word2vec import jieba import jieba.analyse # 不分开一些词 # jieba.suggest_freq('闪电', True) # jieba.suggest_freq('直升机', True) # jieba.suggest_freq('球形', True) # jieba.suggest_freq('爆炸', True) # 分词 with open("/kaggle/input/input/lightning.txt", encoding="GBK") as f: document = f.read() # document_decode = document.decode('GBK') document_cut = jieba.lcut(document) result = " ".join(document_cut) with open("/kaggle/working/two_out.txt", "w", encoding="utf-8") as f2: f2.write(result) # 加载语料 sentences = word2vec.LineSentence("/kaggle/working/two_out.txt") # 训练语料 path = get_tmpfile("word2vec.model") # 创建临时文件 model = word2vec.Word2Vec(sentences, hs=1, min_count=1, window=3, size=50) model.save("word2vec.model") # 模型训练完成 # 闪电 和 直升机 的词向量 print("闪电的词向量是: \n", model.wv.get_vector("闪电")) print("直升机的词向量是: \n", model.wv.get_vector("直升机")) # 球形 和 闪电 的相似度 print("球形和闪电的形似度是:\n", model.wv.similarity("球形", "闪电")) # 与 爆炸 最相近的十个词 print("与爆炸最相近的十个词是:") req_count = 10 for key in model.wv.similar_by_word("爆炸", topn=100): if len(key[0]) > 1: req_count -= 1 print(key[0], key[1]) if req_count == 0: break
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654802.ipynb
input
zbbgreen
[{"Id": 129654802, "ScriptId": 38547456, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10267670, "CreationDate": "05/15/2023 14:06:18", "VersionNumber": 1.0, "Title": "task-3", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 82.0, "LinesInsertedFromFork": 51.0, "LinesDeletedFromFork": 83.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 82.0, "TotalVotes": 0}]
[{"Id": 185950613, "KernelVersionId": 129654802, "SourceDatasetVersionId": 5689691}]
[{"Id": 5689691, "DatasetId": 3271224, "DatasourceVersionId": 5765292, "CreatorUserId": 10267670, "LicenseName": "Unknown", "CreationDate": "05/15/2023 10:44:51", "VersionNumber": 1.0, "Title": "Exam-Three-3", "Slug": "input", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3271224, "CreatorUserId": 10267670, "OwnerUserId": 10267670.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5689691.0, "CurrentDatasourceVersionId": 5765292.0, "ForumId": 3336863, "Type": 2, "CreationDate": "05/15/2023 10:44:51", "LastActivityDate": "05/15/2023", "TotalViews": 35, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 10267670, "UserName": "zbbgreen", "DisplayName": "zbbgreen", "RegisterDate": "04/17/2022", "PerformanceTier": 0}]
from pyhanlp import * from jpype import JString import jieba.analyse.textrank # ## 分词 my_data = open(r"/kaggle/input/input/3.txt", "r", encoding="utf8").read() words2 = jieba.cut(my_data, cut_all=True) file = open(r"./result.txt", "w", encoding="utf8") # 注意a或者w file.write(" ".join(words2)) NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) CorpusLoader = SafeJClass("com.hankcs.hanlp.corpus.document.CorpusLoader") def train_bigram(corpus_path, model_path): sents = CorpusLoader.convert2SentenceList(corpus_path) for sent in sents: # 为兼容hanlp字典格式,为每个单词添加占位符 for word in sent: word.setLabel("n") # 创建maker对象 maker = NatureDictionaryMaker() # 进行一元、二元统计 maker.compute(sents) # 保存文件,会得到三个文件 maker.saveTxtTo(model_path) # unigram # bigram train_bigram("./result.txt", "./part1") corpus_path = "/kaggle/working/result.txt" # 语料库所在路径 sentences = CorpusLoader.convert2SentenceList(corpus_path) # 返回List<List<IWord>>类型 for sent in sentences: print(sent) NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) # 词典模型Java模块(统计一元、二元语法) CorpusLoader = SafeJClass( "com.hankcs.hanlp.corpus.document.CorpusLoader" ) # 语料库加载Java模块 model_path = "/kaggle/working/my_cws_model" # 语言模型存储路径 for sent in sentences: for word in sent: if word.label is None: word.setLabel("n") # 赋予每个单词一个虚拟的n词性用作占位 maker = NatureDictionaryMaker() # 构造NatureDictionaryMaker对象 maker.compute(sentences) # 统计句子中的一元语法、二元语法 maker.saveTxtTo(model_path) # 将统计结果存储到路径 HanLP.Config.CoreDictionaryPath = model_path + ".txt" # 一元语法模型路径 HanLP.Config.BiGramDictionaryPath = model_path + ".ngram.txt" # 二元语法模型路径 CoreDictionary = LazyLoadingJClass( "com.hankcs.hanlp.dictionary.CoreDictionary" ) # 加载一元语法模型Java模块 print(CoreDictionary.getTermFrequency("核酸")) # 测试"核酸"的一元语法频次 CoreBiGramTableDictionary = SafeJClass( "com.hankcs.hanlp.dictionary.CoreBiGramTableDictionary" ) print(CoreBiGramTableDictionary.getBiFrequency("核酸", "检测")) ## 加载 JAVA 类 CorpusLoader = SafeJClass("com.hankcs.hanlp.corpus.document.CorpusLoader") NatureDictionaryMaker = SafeJClass( "com.hankcs.hanlp.corpus.dictionary.NatureDictionaryMaker" ) CoreDictionary = LazyLoadingJClass("com.hankcs.hanlp.dictionary.CoreDictionary") WordNet = JClass("com.hankcs.hanlp.seg.common.WordNet") Vertex = JClass("com.hankcs.hanlp.seg.common.Vertex") # # **实验二** from gensim import * from gensim.test.utils import common_texts, get_tmpfile from gensim.models import word2vec import jieba import jieba.analyse # 不分开一些词 # jieba.suggest_freq('闪电', True) # jieba.suggest_freq('直升机', True) # jieba.suggest_freq('球形', True) # jieba.suggest_freq('爆炸', True) # 分词 with open("/kaggle/input/input/lightning.txt", encoding="GBK") as f: document = f.read() # document_decode = document.decode('GBK') document_cut = jieba.lcut(document) result = " ".join(document_cut) with open("/kaggle/working/two_out.txt", "w", encoding="utf-8") as f2: f2.write(result) # 加载语料 sentences = word2vec.LineSentence("/kaggle/working/two_out.txt") # 训练语料 path = get_tmpfile("word2vec.model") # 创建临时文件 model = word2vec.Word2Vec(sentences, hs=1, min_count=1, window=3, size=50) model.save("word2vec.model") # 模型训练完成 # 闪电 和 直升机 的词向量 print("闪电的词向量是: \n", model.wv.get_vector("闪电")) print("直升机的词向量是: \n", model.wv.get_vector("直升机")) # 球形 和 闪电 的相似度 print("球形和闪电的形似度是:\n", model.wv.similarity("球形", "闪电")) # 与 爆炸 最相近的十个词 print("与爆炸最相近的十个词是:") req_count = 10 for key in model.wv.similar_by_word("爆炸", topn=100): if len(key[0]) > 1: req_count -= 1 print(key[0], key[1]) if req_count == 0: break
false
0
1,463
0
1,483
1,463
129654342
# # PPL import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel def calculate_ppl(text, model_name="gpt2"): tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) if torch.cuda.is_available(): model.cuda() model.eval() tokenized_text = tokenizer.encode(text, return_tensors="pt") if torch.cuda.is_available(): tokenized_text = tokenized_text.cuda() with torch.no_grad(): outputs = model(tokenized_text, labels=tokenized_text) loss = outputs.loss ppl = torch.exp(loss) return ppl.item() if __name__ == "__main__": text = ( "The blue sky reflected in the water of the forest, creating a surreal sight." ) ppl = calculate_ppl(text) print(f"Perplexity: {ppl}") import torch from transformers import GPT2LMHeadModel, GPT2Tokenizer def calculate_ppl(text, model, tokenizer): encodings = tokenizer(text, return_tensors="pt") with torch.no_grad(): outputs = model(**encodings, labels=encodings["input_ids"]) loss = outputs.loss.item() ppl = torch.exp(torch.tensor(loss)).item() return ppl def average_ppl_of_txt(file_path, model, tokenizer): total_ppl = 0 num_lines = 0 with open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if not line: continue ppl = calculate_ppl(line, model, tokenizer) total_ppl += ppl num_lines += 1 avg_ppl = total_ppl / num_lines return avg_ppl def main(): model_name = "gpt2" # 可以选择其他预训练模型,例如 "gpt2-medium", "gpt2-large", "gpt2-xl" tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) file_path = "/kaggle/input/NLP-Evaluation-Sample/source.txt" # 替换为您要计算的txt文件的路径 avg_ppl = average_ppl_of_txt(file_path, model, tokenizer) print(f"Average Perplexity (PPL) of the file '{file_path}': {avg_ppl}") if __name__ == "__main__": main() # # BLEU import sys from nltk.translate.bleu_score import sentence_bleu, corpus_bleu from typing import List, Tuple def read_txt_file(file_path: str) -> List[str]: with open(file_path, "r", encoding="utf-8") as f: content = f.readlines() return [line.strip() for line in content] def load_data(source: str, reference: str) -> Tuple[List[str], List[List[str]]]: source_data = read_txt_file(source) reference_data = read_txt_file(reference) references = [[ref] for ref in reference_data] # 包装为二维数组,因为corpus_bleu需要这样的格式 return source_data, references def calculate_bleu_score(source: str, reference: str) -> float: source_data, reference_data = load_data(source, reference) bleu_score = corpus_bleu(reference_data, source_data) return bleu_score # if __name__ == "__main__": # if len(sys.argv) != 3: # print("Usage: python bleu_eval.py source.txt reference.txt") # sys.exit(1) # source = sys.argv[1] # reference = sys.argv[2] source = "/kaggle/input/NLP-Evaluation-Sample/source.txt" reference = "/kaggle/input/NLP-Evaluation-Sample/reference.txt" score = calculate_bleu_score(source, reference) print(f"BLEU Score: {score:.2f}") # # Self-BLEU import nltk import random def read_file(filename): """读取txt文件并返回一个字符串列表""" with open(filename, "r", encoding="utf-8") as f: lines = f.readlines() return [line.strip() for line in lines] def compute_self_bleu(corpus, ngram=4, samples=1000): """计算Self-Bleu值""" bleus = [] for i in range(samples): # 从语料库中随机选择一句话 ref = random.choice(corpus) # 生成k个随机样本 samples = [random.choice(corpus) for _ in range(ngram)] # 计算Self-Bleu值 bleu = nltk.translate.bleu_score.sentence_bleu(samples, ref) bleus.append(bleu) # 返回平均Self-Bleu值 return sum(bleus) / len(bleus) # 示例用法 corpus = read_file("/kaggle/input/NLP-Evaluation-Sample/source.txt") self_bleu = compute_self_bleu(corpus, ngram=4, samples=1000) print(f"Self-Bleu: {self_bleu:.4f}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654342.ipynb
null
null
[{"Id": 129654342, "ScriptId": 38274243, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4963800, "CreationDate": "05/15/2023 14:03:17", "VersionNumber": 1.0, "Title": "Metrics", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 141.0, "LinesInsertedFromPrevious": 141.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # PPL import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel def calculate_ppl(text, model_name="gpt2"): tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) if torch.cuda.is_available(): model.cuda() model.eval() tokenized_text = tokenizer.encode(text, return_tensors="pt") if torch.cuda.is_available(): tokenized_text = tokenized_text.cuda() with torch.no_grad(): outputs = model(tokenized_text, labels=tokenized_text) loss = outputs.loss ppl = torch.exp(loss) return ppl.item() if __name__ == "__main__": text = ( "The blue sky reflected in the water of the forest, creating a surreal sight." ) ppl = calculate_ppl(text) print(f"Perplexity: {ppl}") import torch from transformers import GPT2LMHeadModel, GPT2Tokenizer def calculate_ppl(text, model, tokenizer): encodings = tokenizer(text, return_tensors="pt") with torch.no_grad(): outputs = model(**encodings, labels=encodings["input_ids"]) loss = outputs.loss.item() ppl = torch.exp(torch.tensor(loss)).item() return ppl def average_ppl_of_txt(file_path, model, tokenizer): total_ppl = 0 num_lines = 0 with open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if not line: continue ppl = calculate_ppl(line, model, tokenizer) total_ppl += ppl num_lines += 1 avg_ppl = total_ppl / num_lines return avg_ppl def main(): model_name = "gpt2" # 可以选择其他预训练模型,例如 "gpt2-medium", "gpt2-large", "gpt2-xl" tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) file_path = "/kaggle/input/NLP-Evaluation-Sample/source.txt" # 替换为您要计算的txt文件的路径 avg_ppl = average_ppl_of_txt(file_path, model, tokenizer) print(f"Average Perplexity (PPL) of the file '{file_path}': {avg_ppl}") if __name__ == "__main__": main() # # BLEU import sys from nltk.translate.bleu_score import sentence_bleu, corpus_bleu from typing import List, Tuple def read_txt_file(file_path: str) -> List[str]: with open(file_path, "r", encoding="utf-8") as f: content = f.readlines() return [line.strip() for line in content] def load_data(source: str, reference: str) -> Tuple[List[str], List[List[str]]]: source_data = read_txt_file(source) reference_data = read_txt_file(reference) references = [[ref] for ref in reference_data] # 包装为二维数组,因为corpus_bleu需要这样的格式 return source_data, references def calculate_bleu_score(source: str, reference: str) -> float: source_data, reference_data = load_data(source, reference) bleu_score = corpus_bleu(reference_data, source_data) return bleu_score # if __name__ == "__main__": # if len(sys.argv) != 3: # print("Usage: python bleu_eval.py source.txt reference.txt") # sys.exit(1) # source = sys.argv[1] # reference = sys.argv[2] source = "/kaggle/input/NLP-Evaluation-Sample/source.txt" reference = "/kaggle/input/NLP-Evaluation-Sample/reference.txt" score = calculate_bleu_score(source, reference) print(f"BLEU Score: {score:.2f}") # # Self-BLEU import nltk import random def read_file(filename): """读取txt文件并返回一个字符串列表""" with open(filename, "r", encoding="utf-8") as f: lines = f.readlines() return [line.strip() for line in lines] def compute_self_bleu(corpus, ngram=4, samples=1000): """计算Self-Bleu值""" bleus = [] for i in range(samples): # 从语料库中随机选择一句话 ref = random.choice(corpus) # 生成k个随机样本 samples = [random.choice(corpus) for _ in range(ngram)] # 计算Self-Bleu值 bleu = nltk.translate.bleu_score.sentence_bleu(samples, ref) bleus.append(bleu) # 返回平均Self-Bleu值 return sum(bleus) / len(bleus) # 示例用法 corpus = read_file("/kaggle/input/NLP-Evaluation-Sample/source.txt") self_bleu = compute_self_bleu(corpus, ngram=4, samples=1000) print(f"Self-Bleu: {self_bleu:.4f}")
false
0
1,354
0
1,354
1,354
129654468
<jupyter_start><jupyter_text>Wild blueberry Yield Prediction Dataset ### Context Blueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries—both wild (lowbush) and cultivated (highbush)—are all native to North America. The highbush varieties were introduced into Europe during the 1930s. Blueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as "lowbush blueberries" (synonymous with "wild"), while the species with larger berries growing on taller, cultivated bushes are known as "highbush blueberries". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries. ### Content "The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches." Features Unit Description Clonesize m2 The average blueberry clone size in the field Honeybee bees/m2/min Honeybee density in the field Bumbles bees/m2/min Bumblebee density in the field Andrena bees/m2/min Andrena bee density in the field Osmia bees/m2/min Osmia bee density in the field MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature AverageOfUpperTRange ℃ The average of the upper band daily air temperature MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature AverageOfLowerTRange ℃ The average of the lower band daily air temperature RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero AverageRainingDays Day The average of raining days of the entire bloom season Kaggle dataset identifier: wild-blueberry-yield-prediction-dataset <jupyter_script># # 📈 EDA import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df_train = pd.read_csv( "/kaggle/input/playground-series-s3e14/train.csv", index_col="id" ) df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id") df_train df_train.info() # All features are numerical and the data is clean so there is no need for data imputation, type casting, etc. . plt.figure(figsize=(16, 5)) sns.heatmap(df_train.corr(), cmap="crest", annot=True, fmt=".2f") plt.show() # In the first look, "fruitset", "fruitmass", "seeds", "clonesize", "RainingDays", and "AverageRainingDays" are highly correlated with the target column which is "yield" so we can use these features directly. # It is imporant to note that three former ones are highly correlated. # Also it is possible to group the other features into two groups: # * Bee Features: number of different bee species per m2. # * Temperature Feautres: Max, min, and mean of high and low temperatures per day. # We could do some feature engineering and try to combine features in each group. pp = sns.pairplot( data=df_train, y_vars=["yield"], x_vars=["fruitset", "fruitmass", "seeds"] ) df_train.describe() # The feautres has various ranges so it is important to normalize them. # # 🎣 Feature Engineering from scipy import stats from sklearn.metrics import mean_absolute_error (np.abs(stats.zscore(df_train)) > 3).sum(axis=0) df_train = df_train[(np.abs(stats.zscore(df_train)) < 3).all(axis=1)].reset_index( drop=True ) df_train.shape df_train = df_train.drop_duplicates().reset_index(drop=True) df_train.shape df_train["fruit_seed"] = df_train["fruitset"] * df_train["seeds"] df_test["fruit_seed"] = df_test["fruitset"] * df_test["seeds"] to_cat = ["clonesize", "honeybee", "bumbles", "andrena", "osmia", "MaxOfUpperTRange"] for cat in to_cat: unique_values = np.unique( np.concatenate((df_train[cat].unique(), df_test[cat].unique())) ) df_train[cat] = pd.Categorical( df_train[cat], categories=unique_values, ordered=True ) df_test[cat] = pd.Categorical(df_test[cat], categories=unique_values, ordered=True) df_train[cat] = df_train[cat].cat.codes df_test[cat] = df_test[cat].cat.codes columns = ["fruitset", "fruitmass", "seeds", "fruit_seed", "MaxOfUpperTRange"] df_train = df_train[columns + ["yield"]] df_test = df_test[columns] X_train = df_train.drop(["yield"], axis=1) y_train = df_train["yield"] X_test = df_test # # 🎢 Model Selection with PyCaret from pycaret.regression import RegressionExperiment s = RegressionExperiment() s.setup(X_train, target=y_train, session_id=42, fold=5) best = s.compare_models() # s.plot_model(best, plot = 'residuals') # # Candidate Model Tuning from sklearn.model_selection import KFold def postprocessor(prediction): unique_targets = np.unique(df_train["yield"]) return [min(unique_targets, key=lambda x: abs(x - pred)) for pred in prediction] def cross_validate_kfold(model, X, y, X_test, label): n_splits = 5 kf = KFold(n_splits=n_splits, shuffle=True, random_state=42) train_predictions = np.zeros_like(y) valid_predictions = np.zeros_like(y) train_mae, valid_mae = [], [] preds = np.zeros((len(X_test))) for fold, (train_idx, valid_idx) in enumerate(kf.split(X, y)): X_train = X.iloc[train_idx, :] X_valid = X.iloc[valid_idx, :] model.fit(X_train, y[train_idx]) train_preds = model.predict(X_train) valid_preds = model.predict(X_valid) train_predictions[train_idx] += postprocessor(model.predict(X_train)) valid_predictions[valid_idx] += postprocessor(model.predict(X_valid)) train_mae.append(mean_absolute_error(train_preds, y[train_idx])) valid_mae.append(mean_absolute_error(valid_preds, y[valid_idx])) preds += model.predict(X_test) print( f"{label}=> Val MAE: {np.mean(valid_mae):.5f} ± {np.std(valid_mae):.5f} | Train MAE: {np.mean(train_mae):.5f} ± {np.std(train_mae):.5f}" ) return preds / n_splits # # Blending Models from catboost import CatBoostRegressor from lightgbm import LGBMRegressor # Some hyperparameters comes from these great notebooks. # https://www.kaggle.com/code/chayaphatnicrothanon/lb-score-338-63-eda-catboost-lightgbm-kfolds#Model # https://www.kaggle.com/code/zhukovoleksiy/ps-s3e14-simple-eda-ensemble # https://www.kaggle.com/code/arunklenin/ps3e14-eda-transformers-ensemble#6.-Modeling n_estimators = 200 device = "gpu" random_state = 42 lgb_params0 = { "boosting_type": "gbdt", "class_weight": None, "colsample_bytree": 0.8, "importance_type": "split", "learning_rate": 0.04, "max_depth": -1, "min_child_samples": 20, "min_child_weight": 0.001, "min_split_gain": 0.0, "n_estimators": 3000, "n_jobs": -1, "num_leaves": 31, "objective": "regression_l1", "random_state": None, "reg_alpha": 0.0, "reg_lambda": 0.0, "silent": True, "verbose": -100, "subsample": 0.7, "subsample_for_bin": 200000, "subsample_freq": 0, "max_bin": 1000, "bagging_freq": 1, "metric": "mae", } lgb_params1 = { "n_estimators": n_estimators, "num_leaves": 93, "min_child_samples": 20, "learning_rate": 0.05533790147941807, "colsample_bytree": 0.8809128870084636, "reg_alpha": 0.0009765625, "reg_lambda": 0.015589408048174165, "objective": "regression_l1", "metric": "mean_absolute_error", "boosting_type": "gbdt", "random_state": random_state, } lgb_params2 = { "n_estimators": n_estimators, "num_leaves": 45, "max_depth": 13, "learning_rate": 0.0684383311038932, "subsample": 0.5758412171285148, "colsample_bytree": 0.8599714680300794, "reg_lambda": 1.597717830931487e-08, "objective": "regression_l1", "metric": "mean_absolute_error", "boosting_type": "gbdt", "random_state": random_state, "force_col_wise": True, } cat_features = np.where(X_train.dtypes != np.float)[0] cb_params = { "iterations": 3000, "loss_function": "MAE", "random_state": 69, "early_stopping_rounds": 1000, "cat_features": cat_features, } preds = [] models = [ ("lgbm0", LGBMRegressor(**lgb_params0)), ("lgbm1", LGBMRegressor(**lgb_params1)), ("lgbm2", LGBMRegressor(**lgb_params2)), ("cb", CatBoostRegressor(silent=True, **cb_params)), ] for label, model in models: preds.append(cross_validate_kfold(model, X_train, y_train, X_test, label)) preds_array = np.array(preds) final_preds = postprocessor(preds_array.mean(axis=0)) submission = pd.DataFrame( { "id": X_test.index, "yield": final_preds, } ) submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654468.ipynb
wild-blueberry-yield-prediction-dataset
shashwatwork
[{"Id": 129654468, "ScriptId": 38018581, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7932415, "CreationDate": "05/15/2023 14:04:15", "VersionNumber": 10.0, "Title": "\ud83c\udf47 Wild Blueberry Yield", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 226.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 104.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185950004, "KernelVersionId": 129654468, "SourceDatasetVersionId": 2462316}]
[{"Id": 2462316, "DatasetId": 1490445, "DatasourceVersionId": 2504743, "CreatorUserId": 1444085, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/25/2021 17:48:21", "VersionNumber": 2.0, "Title": "Wild blueberry Yield Prediction Dataset", "Slug": "wild-blueberry-yield-prediction-dataset", "Subtitle": "Predict the yield of Wild Blueberry", "Description": "### Context\n\nBlueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries\u2014both wild (lowbush) and cultivated (highbush)\u2014are all native to North America. The highbush varieties were introduced into Europe during the 1930s.\n\nBlueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as \"lowbush blueberries\" (synonymous with \"wild\"), while the species with larger berries growing on taller, cultivated bushes are known as \"highbush blueberries\". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries.\n\n### Content\n\n\"The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches.\"\n\nFeatures \tUnit\tDescription\nClonesize\tm2\tThe average blueberry clone size in the field\nHoneybee\tbees/m2/min\tHoneybee density in the field\nBumbles\tbees/m2/min\tBumblebee density in the field\nAndrena\tbees/m2/min\tAndrena bee density in the field\nOsmia\tbees/m2/min\tOsmia bee density in the field\nMaxOfUpperTRange\t\u2103\tThe highest record of the upper band daily air temperature during the bloom season\nMinOfUpperTRange\t\u2103\tThe lowest record of the upper band daily air temperature\nAverageOfUpperTRange\t\u2103\tThe average of the upper band daily air temperature\nMaxOfLowerTRange\t\u2103\tThe highest record of the lower band daily air temperature\nMinOfLowerTRange\t\u2103\tThe lowest record of the lower band daily air temperature\nAverageOfLowerTRange\t\u2103\tThe average of the lower band daily air temperature\nRainingDays\tDay\tThe total number of days during the bloom season, each of which has precipitation larger than zero\nAverageRainingDays\tDay\tThe average of raining days of the entire bloom season\n\n### Acknowledgements\n\nQu, Hongchun; Obsie, Efrem; Drummond, Frank (2020), \u201cData for: Wild blueberry yield prediction using a combination of computer simulation and machine learning algorithms\u201d, Mendeley Data, V1, doi: 10.17632/p5hvjzsvn8.1\n\nDataset is outsourced from [here.](https://data.mendeley.com/datasets/p5hvjzsvn8/1)", "VersionNotes": "updated", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1490445, "CreatorUserId": 1444085, "OwnerUserId": 1444085.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462316.0, "CurrentDatasourceVersionId": 2504743.0, "ForumId": 1510148, "Type": 2, "CreationDate": "07/25/2021 17:47:00", "LastActivityDate": "07/25/2021", "TotalViews": 11876, "TotalDownloads": 1130, "TotalVotes": 48, "TotalKernels": 82}]
[{"Id": 1444085, "UserName": "shashwatwork", "DisplayName": "Shashwat Tiwari", "RegisterDate": "11/24/2017", "PerformanceTier": 2}]
# # 📈 EDA import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df_train = pd.read_csv( "/kaggle/input/playground-series-s3e14/train.csv", index_col="id" ) df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id") df_train df_train.info() # All features are numerical and the data is clean so there is no need for data imputation, type casting, etc. . plt.figure(figsize=(16, 5)) sns.heatmap(df_train.corr(), cmap="crest", annot=True, fmt=".2f") plt.show() # In the first look, "fruitset", "fruitmass", "seeds", "clonesize", "RainingDays", and "AverageRainingDays" are highly correlated with the target column which is "yield" so we can use these features directly. # It is imporant to note that three former ones are highly correlated. # Also it is possible to group the other features into two groups: # * Bee Features: number of different bee species per m2. # * Temperature Feautres: Max, min, and mean of high and low temperatures per day. # We could do some feature engineering and try to combine features in each group. pp = sns.pairplot( data=df_train, y_vars=["yield"], x_vars=["fruitset", "fruitmass", "seeds"] ) df_train.describe() # The feautres has various ranges so it is important to normalize them. # # 🎣 Feature Engineering from scipy import stats from sklearn.metrics import mean_absolute_error (np.abs(stats.zscore(df_train)) > 3).sum(axis=0) df_train = df_train[(np.abs(stats.zscore(df_train)) < 3).all(axis=1)].reset_index( drop=True ) df_train.shape df_train = df_train.drop_duplicates().reset_index(drop=True) df_train.shape df_train["fruit_seed"] = df_train["fruitset"] * df_train["seeds"] df_test["fruit_seed"] = df_test["fruitset"] * df_test["seeds"] to_cat = ["clonesize", "honeybee", "bumbles", "andrena", "osmia", "MaxOfUpperTRange"] for cat in to_cat: unique_values = np.unique( np.concatenate((df_train[cat].unique(), df_test[cat].unique())) ) df_train[cat] = pd.Categorical( df_train[cat], categories=unique_values, ordered=True ) df_test[cat] = pd.Categorical(df_test[cat], categories=unique_values, ordered=True) df_train[cat] = df_train[cat].cat.codes df_test[cat] = df_test[cat].cat.codes columns = ["fruitset", "fruitmass", "seeds", "fruit_seed", "MaxOfUpperTRange"] df_train = df_train[columns + ["yield"]] df_test = df_test[columns] X_train = df_train.drop(["yield"], axis=1) y_train = df_train["yield"] X_test = df_test # # 🎢 Model Selection with PyCaret from pycaret.regression import RegressionExperiment s = RegressionExperiment() s.setup(X_train, target=y_train, session_id=42, fold=5) best = s.compare_models() # s.plot_model(best, plot = 'residuals') # # Candidate Model Tuning from sklearn.model_selection import KFold def postprocessor(prediction): unique_targets = np.unique(df_train["yield"]) return [min(unique_targets, key=lambda x: abs(x - pred)) for pred in prediction] def cross_validate_kfold(model, X, y, X_test, label): n_splits = 5 kf = KFold(n_splits=n_splits, shuffle=True, random_state=42) train_predictions = np.zeros_like(y) valid_predictions = np.zeros_like(y) train_mae, valid_mae = [], [] preds = np.zeros((len(X_test))) for fold, (train_idx, valid_idx) in enumerate(kf.split(X, y)): X_train = X.iloc[train_idx, :] X_valid = X.iloc[valid_idx, :] model.fit(X_train, y[train_idx]) train_preds = model.predict(X_train) valid_preds = model.predict(X_valid) train_predictions[train_idx] += postprocessor(model.predict(X_train)) valid_predictions[valid_idx] += postprocessor(model.predict(X_valid)) train_mae.append(mean_absolute_error(train_preds, y[train_idx])) valid_mae.append(mean_absolute_error(valid_preds, y[valid_idx])) preds += model.predict(X_test) print( f"{label}=> Val MAE: {np.mean(valid_mae):.5f} ± {np.std(valid_mae):.5f} | Train MAE: {np.mean(train_mae):.5f} ± {np.std(train_mae):.5f}" ) return preds / n_splits # # Blending Models from catboost import CatBoostRegressor from lightgbm import LGBMRegressor # Some hyperparameters comes from these great notebooks. # https://www.kaggle.com/code/chayaphatnicrothanon/lb-score-338-63-eda-catboost-lightgbm-kfolds#Model # https://www.kaggle.com/code/zhukovoleksiy/ps-s3e14-simple-eda-ensemble # https://www.kaggle.com/code/arunklenin/ps3e14-eda-transformers-ensemble#6.-Modeling n_estimators = 200 device = "gpu" random_state = 42 lgb_params0 = { "boosting_type": "gbdt", "class_weight": None, "colsample_bytree": 0.8, "importance_type": "split", "learning_rate": 0.04, "max_depth": -1, "min_child_samples": 20, "min_child_weight": 0.001, "min_split_gain": 0.0, "n_estimators": 3000, "n_jobs": -1, "num_leaves": 31, "objective": "regression_l1", "random_state": None, "reg_alpha": 0.0, "reg_lambda": 0.0, "silent": True, "verbose": -100, "subsample": 0.7, "subsample_for_bin": 200000, "subsample_freq": 0, "max_bin": 1000, "bagging_freq": 1, "metric": "mae", } lgb_params1 = { "n_estimators": n_estimators, "num_leaves": 93, "min_child_samples": 20, "learning_rate": 0.05533790147941807, "colsample_bytree": 0.8809128870084636, "reg_alpha": 0.0009765625, "reg_lambda": 0.015589408048174165, "objective": "regression_l1", "metric": "mean_absolute_error", "boosting_type": "gbdt", "random_state": random_state, } lgb_params2 = { "n_estimators": n_estimators, "num_leaves": 45, "max_depth": 13, "learning_rate": 0.0684383311038932, "subsample": 0.5758412171285148, "colsample_bytree": 0.8599714680300794, "reg_lambda": 1.597717830931487e-08, "objective": "regression_l1", "metric": "mean_absolute_error", "boosting_type": "gbdt", "random_state": random_state, "force_col_wise": True, } cat_features = np.where(X_train.dtypes != np.float)[0] cb_params = { "iterations": 3000, "loss_function": "MAE", "random_state": 69, "early_stopping_rounds": 1000, "cat_features": cat_features, } preds = [] models = [ ("lgbm0", LGBMRegressor(**lgb_params0)), ("lgbm1", LGBMRegressor(**lgb_params1)), ("lgbm2", LGBMRegressor(**lgb_params2)), ("cb", CatBoostRegressor(silent=True, **cb_params)), ] for label, model in models: preds.append(cross_validate_kfold(model, X_train, y_train, X_test, label)) preds_array = np.array(preds) final_preds = postprocessor(preds_array.mean(axis=0)) submission = pd.DataFrame( { "id": X_test.index, "yield": final_preds, } ) submission.to_csv("submission.csv", index=False)
false
2
2,458
2
3,194
2,458
129654529
# # ⛴ Titanic | Advanced EDA & Prediction with HP Tuning ⛴ # # ☀ Import Libraries ☀ # Classic Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Advanced Visualization Libraries from plotly import graph_objs as go import plotly.express as px import plotly.figure_factory as ff from plotly.subplots import make_subplots from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) # enables plotly plots to be displayed in notebook cmap1 = "gist_gray" # Models from lightgbm import LGBMClassifier from sklearn.decomposition import PCA from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import BernoulliNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from catboost import CatBoostClassifier from xgboost import XGBClassifier from sklearn.ensemble import AdaBoostClassifier # Metrics, Preprocessing and Tuning Tools from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import RobustScaler from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn.metrics import auc from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.metrics import f1_score from sklearn.metrics import roc_curve # from sklearn.metrics import plot_roc_curve from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.impute import KNNImputer from sklearn.preprocessing import MinMaxScaler import missingno as msno from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from pandas_profiling import ProfileReport # Customization import warnings warnings.filterwarnings("ignore") from termcolor import colored # # # ⇣ Load and Check Data ⇣ train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") df_train = train.copy() df_test = test.copy() def check_data(df): print(80 * "*") print("DIMENSION: ({}, {})".format(df.shape[0], df.shape[1])) print(80 * "*") print("COLUMNS:\n") print(df.columns.values) print(80 * "*") print("DATA INFO:\n") print(df.dtypes) print(80 * "*") print("MISSING VALUES:\n") print(df.isnull().sum()) print(80 * "*") print("NUMBER OF UNIQUE VALUES:\n") print(df.nunique()) check_data(df_train) check_data(df_test) def grab_col_names(dataframe, cat_th=10, car_th=20): # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [ col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O" ] cat_but_car = [ col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O" ] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] print(" RESULT ".center(50, "-")) print(f"Observations: {dataframe.shape[0]}") print(f"Variables: {dataframe.shape[1]}") print(f"cat_cols: {len(cat_cols)}") print(f"num_cols: {len(num_cols)}") print(f"cat_but_car: {len(cat_but_car)}") print(f"num_but_cat: {len(num_but_cat)}") print("".center(50, "-")) return cat_cols, num_cols, cat_but_car cat_cols_train, num_cols_train, cat_but_car_train = grab_col_names(df_train) cat_cols_test, num_cols_test, cat_but_car_test = grab_col_names(df_test) def descriptive_stats(df): desc = df.describe().T desc_df = pd.DataFrame(index=df.columns, columns=desc.columns, data=desc) f, ax = plt.subplots(figsize=(18, 8)) sns.heatmap( desc, annot=True, cmap=cmap1, fmt=".2f", ax=ax, linecolor="black", linewidths=1.5, cbar=False, annot_kws={"size": 15}, ) plt.xticks(size=15) plt.yticks(size=15, rotation=0) plt.title("Descriptive Statistics", size=15) plt.show() descriptive_stats(df_train) descriptive_stats(df_test) df_train.profile_report() df_test.profile_report() # # # ⚡ Exploratory Data Analysis ⚡ def tar_var_summary(df, tar_var): colors = [ "#a2b9bc", "#6b5b95", "#b2ad7f", "#feb236", "#b5e7a0", "#878f99", "#d64161", "#86af49", "#ff7b25", ] fig = make_subplots( rows=1, cols=2, subplot_titles=("Countplot", "Percentages"), specs=[[{"type": "xy"}, {"type": "domain"}]], ) x = [str(i) for i in df[tar_var].value_counts().index] y = df[tar_var].value_counts().values.tolist() fig.add_trace( go.Bar( x=x, y=y, text=y, textposition="auto", showlegend=False, marker=dict(color=colors, line=dict(color="black", width=2)), ), row=1, col=1, ) fig.add_trace( go.Pie( labels=df[tar_var].value_counts().keys(), values=df[tar_var].value_counts().values, pull=[0, 0.25], hoverinfo="label", textinfo="percent", textfont_size=20, textposition="auto", marker=dict(colors=colors, line=dict(color="black", width=2)), ), row=1, col=2, ) fig.update_layout( title={ "text": "Distribution of the Target Variable", "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) tar_var_summary(df_train, "Survived") def num_var_summary(df, num_var): fig = make_subplots(rows=1, cols=2, subplot_titles=("Quantiles", "Distribution")) fig.add_trace( go.Box( y=df[num_var], name=str(num_var), showlegend=False, marker_color="#A6D0DD" ), row=1, col=1, ) fig.add_trace( go.Histogram( x=df[num_var], xbins=dict(start=df[num_var].min(), end=df[num_var].max()), showlegend=False, name=str(num_var), marker=dict(color="#0A4D68", line=dict(color="#DBE6EC", width=1)), ), row=1, col=2, ) fig.update_layout( title={ "text": num_var.capitalize(), "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) for i in num_cols_train: if i != "PassengerId": num_var_summary(df_train, i) def num_features(df, num_var, tar_var): x0 = df[df[tar_var] == 0][num_var] x1 = df[df[tar_var] == 1][num_var] trace1 = go.Histogram( x=x0, name="0", opacity=0.75, marker=dict(color="#0A4D68", line=dict(color="#DBE6EC", width=1)), ) trace2 = go.Histogram( x=x1, name="1", opacity=0.75, marker=dict(color="#A6D0DD", line=dict(color="#DBE6EC", width=1)), ) data = [trace1, trace2] layout = go.Layout( title={ "text": num_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, barmode="overlay", yaxis=dict(title="Count"), template="plotly_dark", ) fig = go.Figure(data=data, layout=layout) iplot(fig) for i in num_cols_train: if i != "PassengerId": num_features(df_train, i, "Survived") def df_corr(df): plt.figure(figsize=(9, 6)) corr = df.corr() matrix = np.triu(corr) sns.heatmap(corr, annot=True, mask=matrix, cmap="gist_gray") df_corr(df_train) colors = ["#654E92", "#6C9BCF", "#A5C0DD", "#EBD8B2"] fig = go.Figure( data=go.Splom( dimensions=[ dict(label=col, values=df_train[col]) for col in df_train[num_cols_train] .select_dtypes(include=["int", "float"]) .columns ], showupperhalf=True, text=df_train["Survived"], marker=dict( color=[ colors[i] for i in df_train["Survived"].astype("category").cat.codes ], showscale=False, opacity=0.65, ), ) ) fig.update_layout( title={ "text": "Pairwise Relationships by Survived", "xanchor": "center", "yanchor": "top", "x": 0.5, "y": 0.95, }, width=950, height=950, template="plotly_dark", ) iplot(fig) def detect_outliers(df, num_var): trace0 = go.Box( y=df[num_var], name="All Points", jitter=0.3, pointpos=-1.8, boxpoints="all", marker=dict(color="#a2b9bc"), line=dict(color="#6b5b95"), ) trace1 = go.Box( y=df[num_var], name="Only Whiskers", boxpoints=False, marker=dict(color="#b2ad7f"), line=dict(color="#feb236"), ) trace2 = go.Box( y=df[num_var], name="Suspected Outliers", boxpoints="suspectedoutliers", marker=dict( color="#b5e7a0", outliercolor="#878f99", line=dict(outliercolor="#d64161", outlierwidth=2), ), line=dict(color="#86af49"), ) trace3 = go.Box( y=df[num_var], name="Whiskers and Outliers", boxpoints="outliers", marker=dict(color="#6b5b95"), line=dict(color="#ff7b25"), ) data = [trace0, trace1, trace2, trace3] layout = go.Layout(title="{} Outliers".format(num_var)) layout = go.Layout( title={ "text": num_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, barmode="overlay", yaxis=dict(title="Count"), template="plotly_dark", ) fig = go.Figure(data=data, layout=layout) iplot(fig) for i in num_cols_train: if i != "PassengerId": detect_outliers(df_train, i) def cat_var_summary(df, cat_var): colors = [ "#a2b9bc", "#6b5b95", "#b2ad7f", "#feb236", "#b5e7a0", "#878f99", "#d64161", "#86af49", "#ff7b25", ] fig = make_subplots( rows=1, cols=2, subplot_titles=("Countplot", "Percentages"), specs=[[{"type": "xy"}, {"type": "domain"}]], ) x = [str(i) for i in df[cat_var].value_counts().index] y = df[cat_var].value_counts().values.tolist() fig.add_trace( go.Bar( x=x, y=y, text=y, textposition="auto", showlegend=False, marker=dict(color=colors, line=dict(color="black", width=2)), ), row=1, col=1, ) fig.add_trace( go.Pie( labels=df[cat_var].value_counts().keys(), values=df[cat_var].value_counts().values, hoverinfo="label", textinfo="percent", textfont_size=20, textposition="auto", marker=dict(colors=colors, line=dict(color="black", width=2)), ), row=1, col=2, ) fig.update_layout( title={ "text": cat_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) for i in cat_cols_train: cat_var_summary(df_train, i) df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # # # Data Preprocessing df_train["Age"].fillna(df_train["Age"].median(skipna=True), inplace=True) df_train["Embarked"].fillna(df_train["Embarked"].value_counts().idxmax(), inplace=True) df_train.drop("Cabin", axis=1, inplace=True) df_train.head(10) fig = plt.figure(figsize=(9, 6)) sns.distplot(train["Age"], color="red") sns.distplot(df_train["Age"], color="blue") fig.legend(labels=["Age with Missing Values", "Age with Adjusted Values"]) df_train["TravelAlone"] = np.where((df_train["SibSp"] + df_train["Parch"]) > 0, 0, 1) df_train.drop("SibSp", axis=1, inplace=True) df_train.drop("Parch", axis=1, inplace=True) df_train.head() training = pd.get_dummies(df_train, columns=["Pclass", "Embarked", "Sex"]) training.drop("Sex_female", axis=1, inplace=True) training.drop("PassengerId", axis=1, inplace=True) training.drop("Name", axis=1, inplace=True) training.drop("Ticket", axis=1, inplace=True) final_train = training final_train.head() test_data = df_test.copy() test_data["Age"].fillna(df_train["Age"].median(skipna=True), inplace=True) test_data["Fare"].fillna(df_train["Fare"].median(skipna=True), inplace=True) test_data.drop("Cabin", axis=1, inplace=True) test_data["TravelAlone"] = np.where((test_data["SibSp"] + test_data["Parch"]) > 0, 0, 1) test_data.drop("SibSp", axis=1, inplace=True) test_data.drop("Parch", axis=1, inplace=True) testing = pd.get_dummies(test_data, columns=["Pclass", "Embarked", "Sex"]) testing.drop("Sex_female", axis=1, inplace=True) testing.drop("PassengerId", axis=1, inplace=True) testing.drop("Name", axis=1, inplace=True) testing.drop("Ticket", axis=1, inplace=True) final_test = testing final_test.head() plt.figure(figsize=(9, 6)) ax = sns.kdeplot( final_train["Age"][final_train.Survived == 1], color="darkturquoise", shade=True ) sns.kdeplot( final_train["Age"][final_train.Survived == 0], color="lightcoral", shade=True ) plt.legend(["Survived", "Died"]) plt.title("Density Plot of Age for Surviving Population and Deceased Population") ax.set(xlabel="Age") plt.xlim(-10, 85) plt.show() plt.figure(figsize=(12, 7)) avg_survival_byage = ( final_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean() ) g = sns.barplot(x="Age", y="Survived", data=avg_survival_byage, color="LightSeaGreen") plt.show() final_train["IsMinor"] = np.where(final_train["Age"] <= 16, 1, 0) final_test["IsMinor"] = np.where(final_test["Age"] <= 16, 1, 0) plt.figure(figsize=(9, 6)) ax = sns.kdeplot( final_train["Fare"][final_train.Survived == 1], color="darkturquoise", shade=True ) sns.kdeplot( final_train["Fare"][final_train.Survived == 0], color="lightcoral", shade=True ) plt.legend(["Survived", "Died"]) plt.title("Density Plot of Fare for Surviving Population and Deceased Population") ax.set(xlabel="Fare") plt.xlim(-20, 200) plt.show() # y = final_train["Survived"] # X = final_train.drop(["Survived"], axis=1) cat_cols_train, num_cols_train, cat_but_car_train = grab_col_names(final_train) rs = RobustScaler() final_train[num_cols_train] = rs.fit_transform(final_train[num_cols_train]) y = final_train["Survived"] X = final_train.drop(["Survived"], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42 ) # # # Models def model_performance(model): y_pred = model.fit(X_train, y_train).predict(X_test) fig = make_subplots(rows=1, cols=2, subplot_titles=("Confusion Matrix", "Metrics")) confusion = confusion_matrix(y_test, y_pred) tp = confusion[1, 1] fn = confusion[1, 0] fp = confusion[0, 1] tn = confusion[0, 0] accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * ( ((tp / (tp + fp)) * (tp / (tp + fn))) / ((tp / (tp + fp)) + (tp / (tp + fn))) ) colors = ["#93e4c1", "#3baea0", "#118a7e", "#1f6f78"] show_metrics = pd.DataFrame(data=[[accuracy, precision, recall, f1_score]]) show_metrics = show_metrics.T fig.add_trace( go.Heatmap( z=confusion, x=["0 (pred)", "1 (pred)"], y=["0 (true)", "1 (true)"], xgap=2, ygap=2, colorscale="darkmint", showscale=False, ), row=1, col=1, ) fig.add_trace( go.Bar( x=(show_metrics[0].values), y=["Accuracy", "Precision", "Recall", "F1_score"], text=np.round_(show_metrics[0].values, 4), textposition="auto", textfont=dict(color="white"), orientation="h", opacity=1, marker=dict(color=colors, line=dict(color="white", width=1.5)), ), row=1, col=2, ) fig.update_layout( title={ "text": model.__class__.__name__, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) random_state = 42 models = [ GaussianNB(), DecisionTreeClassifier(random_state=random_state), SVC(random_state=random_state, probability=True), RandomForestClassifier(random_state=random_state), LogisticRegression(random_state=random_state), KNeighborsClassifier(), GradientBoostingClassifier(random_state=random_state), CatBoostClassifier(random_state=random_state), XGBClassifier(random_state=random_state), LGBMClassifier(random_state=random_state), AdaBoostClassifier(random_state=random_state), ] for model in models: model_performance(model) # # # Hyperparameter Tuning SEED = 42 cross_valid_scores = {} # Decision Tree parameters = { "max_depth": [3, 5, 7, 9, 11, 13], } model_desicion_tree = DecisionTreeClassifier( random_state=SEED, class_weight="balanced", ) model_desicion_tree = GridSearchCV( model_desicion_tree, parameters, cv=5, scoring="accuracy", ) model_desicion_tree.fit(X_train, y_train) print("-----") print(f"Best parameters {model_desicion_tree.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_desicion_tree.best_score_:.3f}" ) cross_valid_scores["desicion_tree"] = model_desicion_tree.best_score_ print("-----") # Random Forest parameters = { "n_estimators": [5, 10, 15, 20, 25], "max_depth": [3, 5, 7, 9, 11, 13], } model_random_forest = RandomForestClassifier( random_state=SEED, class_weight="balanced", ) model_random_forest = GridSearchCV( model_random_forest, parameters, cv=5, scoring="accuracy", ) model_random_forest.fit(X_train, y_train) print("-----") print(f"Best parameters {model_random_forest.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_random_forest.best_score_:.3f}" ) cross_valid_scores["random_forest"] = model_random_forest.best_score_ print("-----") # Adaboost parameters = { "n_estimators": [5, 10, 15, 20, 25, 50, 75, 100], "learning_rate": [0.001, 0.01, 0.1, 1.0], } model_adaboost = AdaBoostClassifier( random_state=SEED, ) model_adaboost = GridSearchCV( model_adaboost, parameters, cv=5, scoring="accuracy", ) model_adaboost.fit(X_train, y_train) print("-----") print(f"Best parameters {model_adaboost.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_adaboost.best_score_:.3f}" ) cross_valid_scores["ada_boost"] = model_adaboost.best_score_ print("-----") # Xgboost parameters = { "max_depth": [3, 5, 7, 9], "n_estimators": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], } model_xgb = XGBClassifier( random_state=SEED, ) model_xgb = GridSearchCV( model_xgb, parameters, cv=5, scoring="accuracy", ) model_xgb.fit(X_train, y_train) print("-----") print(f"Best parameters {model_xgb.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_xgb.best_score_:.3f}" ) cross_valid_scores["xgboost"] = model_xgb.best_score_ print("-----") # LightGBM cat_cols = [ "TravelAlone", "Pclass_1", "Pclass_2", "Pclass_3", "Embarked_C", "Embarked_S", "Sex_male", "IsMinor", ] parameters = { "n_estimators": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], "num_leaves": [7, 15, 31], } model_lgbm = LGBMClassifier( random_state=SEED, class_weight="balanced", ) model_lgbm = GridSearchCV( model_lgbm, parameters, cv=5, scoring="accuracy", ) model_lgbm.fit(X_train, y_train, categorical_feature=cat_cols) print("-----") print(f"Best parameters {model_lgbm.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_lgbm.best_score_:.3f}" ) cross_valid_scores["lightgbm"] = model_lgbm.best_score_ print("-----") # CatBoost parameters = { "iterations": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], "depth": [3, 5, 7, 9, 11, 13], } model_catboost = CatBoostClassifier( verbose=False, ) model_catboost = GridSearchCV( model_catboost, parameters, cv=5, scoring="accuracy", ) model_catboost.fit(X_train, y_train) print("-----") print(f"Best parameters {model_catboost.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_catboost.best_score_:.3f}" ) cross_valid_scores["catboost"] = model_catboost.best_score_ print("-----") # Logistic Regression parameters = {"C": [0.001, 0.01, 0.1, 1.0], "penalty": ["l1", "l2"]} model_logistic_regression = LogisticRegression( random_state=SEED, class_weight="balanced", solver="liblinear", ) model_logistic_regression = GridSearchCV( model_logistic_regression, parameters, cv=5, scoring="accuracy", ) model_logistic_regression.fit(X_train, y_train) print("-----") print(f"Best parameters {model_logistic_regression.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_logistic_regression.best_score_:.3f}" ) cross_valid_scores["logistic_regression"] = model_logistic_regression.best_score_ print("-----") # SVC parameters = { "C": [0.001, 0.01, 0.1, 1.0], "kernel": ["linear", "poly", "rbf", "sigmoid"], "gamma": ["scale", "auto"], } model_svc = SVC( random_state=SEED, class_weight="balanced", probability=True, ) model_svc = GridSearchCV( model_svc, parameters, cv=5, scoring="accuracy", ) model_svc.fit(X_train, y_train) print("-----") print(f"Best parameters {model_svc.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_svc.best_score_:.3f}" ) cross_valid_scores["svc"] = model_svc.best_score_ print("-----") # KNN parameters = { "weights": ["uniform", "distance"], } model_k_neighbors = KNeighborsClassifier() model_k_neighbors = GridSearchCV( model_k_neighbors, parameters, cv=5, scoring="accuracy", ) model_k_neighbors.fit(X_train, y_train) print("-----") print(f"Best parameters {model_k_neighbors.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_k_neighbors.best_score_:.3f}" ) cross_valid_scores["k_neighbors"] = model_k_neighbors.best_score_ print("-----")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/654/129654529.ipynb
null
null
[{"Id": 129654529, "ScriptId": 38367800, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6432985, "CreationDate": "05/15/2023 14:04:34", "VersionNumber": 1.0, "Title": "\u26f4 Titanic|Advanced EDA & Prediction with HP Tuning", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 846.0, "LinesInsertedFromPrevious": 846.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 25}]
null
null
null
null
# # ⛴ Titanic | Advanced EDA & Prediction with HP Tuning ⛴ # # ☀ Import Libraries ☀ # Classic Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Advanced Visualization Libraries from plotly import graph_objs as go import plotly.express as px import plotly.figure_factory as ff from plotly.subplots import make_subplots from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) # enables plotly plots to be displayed in notebook cmap1 = "gist_gray" # Models from lightgbm import LGBMClassifier from sklearn.decomposition import PCA from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import BernoulliNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from catboost import CatBoostClassifier from xgboost import XGBClassifier from sklearn.ensemble import AdaBoostClassifier # Metrics, Preprocessing and Tuning Tools from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import RobustScaler from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn.metrics import auc from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.metrics import f1_score from sklearn.metrics import roc_curve # from sklearn.metrics import plot_roc_curve from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.impute import KNNImputer from sklearn.preprocessing import MinMaxScaler import missingno as msno from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from pandas_profiling import ProfileReport # Customization import warnings warnings.filterwarnings("ignore") from termcolor import colored # # # ⇣ Load and Check Data ⇣ train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") df_train = train.copy() df_test = test.copy() def check_data(df): print(80 * "*") print("DIMENSION: ({}, {})".format(df.shape[0], df.shape[1])) print(80 * "*") print("COLUMNS:\n") print(df.columns.values) print(80 * "*") print("DATA INFO:\n") print(df.dtypes) print(80 * "*") print("MISSING VALUES:\n") print(df.isnull().sum()) print(80 * "*") print("NUMBER OF UNIQUE VALUES:\n") print(df.nunique()) check_data(df_train) check_data(df_test) def grab_col_names(dataframe, cat_th=10, car_th=20): # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [ col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O" ] cat_but_car = [ col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O" ] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] print(" RESULT ".center(50, "-")) print(f"Observations: {dataframe.shape[0]}") print(f"Variables: {dataframe.shape[1]}") print(f"cat_cols: {len(cat_cols)}") print(f"num_cols: {len(num_cols)}") print(f"cat_but_car: {len(cat_but_car)}") print(f"num_but_cat: {len(num_but_cat)}") print("".center(50, "-")) return cat_cols, num_cols, cat_but_car cat_cols_train, num_cols_train, cat_but_car_train = grab_col_names(df_train) cat_cols_test, num_cols_test, cat_but_car_test = grab_col_names(df_test) def descriptive_stats(df): desc = df.describe().T desc_df = pd.DataFrame(index=df.columns, columns=desc.columns, data=desc) f, ax = plt.subplots(figsize=(18, 8)) sns.heatmap( desc, annot=True, cmap=cmap1, fmt=".2f", ax=ax, linecolor="black", linewidths=1.5, cbar=False, annot_kws={"size": 15}, ) plt.xticks(size=15) plt.yticks(size=15, rotation=0) plt.title("Descriptive Statistics", size=15) plt.show() descriptive_stats(df_train) descriptive_stats(df_test) df_train.profile_report() df_test.profile_report() # # # ⚡ Exploratory Data Analysis ⚡ def tar_var_summary(df, tar_var): colors = [ "#a2b9bc", "#6b5b95", "#b2ad7f", "#feb236", "#b5e7a0", "#878f99", "#d64161", "#86af49", "#ff7b25", ] fig = make_subplots( rows=1, cols=2, subplot_titles=("Countplot", "Percentages"), specs=[[{"type": "xy"}, {"type": "domain"}]], ) x = [str(i) for i in df[tar_var].value_counts().index] y = df[tar_var].value_counts().values.tolist() fig.add_trace( go.Bar( x=x, y=y, text=y, textposition="auto", showlegend=False, marker=dict(color=colors, line=dict(color="black", width=2)), ), row=1, col=1, ) fig.add_trace( go.Pie( labels=df[tar_var].value_counts().keys(), values=df[tar_var].value_counts().values, pull=[0, 0.25], hoverinfo="label", textinfo="percent", textfont_size=20, textposition="auto", marker=dict(colors=colors, line=dict(color="black", width=2)), ), row=1, col=2, ) fig.update_layout( title={ "text": "Distribution of the Target Variable", "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) tar_var_summary(df_train, "Survived") def num_var_summary(df, num_var): fig = make_subplots(rows=1, cols=2, subplot_titles=("Quantiles", "Distribution")) fig.add_trace( go.Box( y=df[num_var], name=str(num_var), showlegend=False, marker_color="#A6D0DD" ), row=1, col=1, ) fig.add_trace( go.Histogram( x=df[num_var], xbins=dict(start=df[num_var].min(), end=df[num_var].max()), showlegend=False, name=str(num_var), marker=dict(color="#0A4D68", line=dict(color="#DBE6EC", width=1)), ), row=1, col=2, ) fig.update_layout( title={ "text": num_var.capitalize(), "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) for i in num_cols_train: if i != "PassengerId": num_var_summary(df_train, i) def num_features(df, num_var, tar_var): x0 = df[df[tar_var] == 0][num_var] x1 = df[df[tar_var] == 1][num_var] trace1 = go.Histogram( x=x0, name="0", opacity=0.75, marker=dict(color="#0A4D68", line=dict(color="#DBE6EC", width=1)), ) trace2 = go.Histogram( x=x1, name="1", opacity=0.75, marker=dict(color="#A6D0DD", line=dict(color="#DBE6EC", width=1)), ) data = [trace1, trace2] layout = go.Layout( title={ "text": num_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, barmode="overlay", yaxis=dict(title="Count"), template="plotly_dark", ) fig = go.Figure(data=data, layout=layout) iplot(fig) for i in num_cols_train: if i != "PassengerId": num_features(df_train, i, "Survived") def df_corr(df): plt.figure(figsize=(9, 6)) corr = df.corr() matrix = np.triu(corr) sns.heatmap(corr, annot=True, mask=matrix, cmap="gist_gray") df_corr(df_train) colors = ["#654E92", "#6C9BCF", "#A5C0DD", "#EBD8B2"] fig = go.Figure( data=go.Splom( dimensions=[ dict(label=col, values=df_train[col]) for col in df_train[num_cols_train] .select_dtypes(include=["int", "float"]) .columns ], showupperhalf=True, text=df_train["Survived"], marker=dict( color=[ colors[i] for i in df_train["Survived"].astype("category").cat.codes ], showscale=False, opacity=0.65, ), ) ) fig.update_layout( title={ "text": "Pairwise Relationships by Survived", "xanchor": "center", "yanchor": "top", "x": 0.5, "y": 0.95, }, width=950, height=950, template="plotly_dark", ) iplot(fig) def detect_outliers(df, num_var): trace0 = go.Box( y=df[num_var], name="All Points", jitter=0.3, pointpos=-1.8, boxpoints="all", marker=dict(color="#a2b9bc"), line=dict(color="#6b5b95"), ) trace1 = go.Box( y=df[num_var], name="Only Whiskers", boxpoints=False, marker=dict(color="#b2ad7f"), line=dict(color="#feb236"), ) trace2 = go.Box( y=df[num_var], name="Suspected Outliers", boxpoints="suspectedoutliers", marker=dict( color="#b5e7a0", outliercolor="#878f99", line=dict(outliercolor="#d64161", outlierwidth=2), ), line=dict(color="#86af49"), ) trace3 = go.Box( y=df[num_var], name="Whiskers and Outliers", boxpoints="outliers", marker=dict(color="#6b5b95"), line=dict(color="#ff7b25"), ) data = [trace0, trace1, trace2, trace3] layout = go.Layout(title="{} Outliers".format(num_var)) layout = go.Layout( title={ "text": num_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, barmode="overlay", yaxis=dict(title="Count"), template="plotly_dark", ) fig = go.Figure(data=data, layout=layout) iplot(fig) for i in num_cols_train: if i != "PassengerId": detect_outliers(df_train, i) def cat_var_summary(df, cat_var): colors = [ "#a2b9bc", "#6b5b95", "#b2ad7f", "#feb236", "#b5e7a0", "#878f99", "#d64161", "#86af49", "#ff7b25", ] fig = make_subplots( rows=1, cols=2, subplot_titles=("Countplot", "Percentages"), specs=[[{"type": "xy"}, {"type": "domain"}]], ) x = [str(i) for i in df[cat_var].value_counts().index] y = df[cat_var].value_counts().values.tolist() fig.add_trace( go.Bar( x=x, y=y, text=y, textposition="auto", showlegend=False, marker=dict(color=colors, line=dict(color="black", width=2)), ), row=1, col=1, ) fig.add_trace( go.Pie( labels=df[cat_var].value_counts().keys(), values=df[cat_var].value_counts().values, hoverinfo="label", textinfo="percent", textfont_size=20, textposition="auto", marker=dict(colors=colors, line=dict(color="black", width=2)), ), row=1, col=2, ) fig.update_layout( title={ "text": cat_var, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) for i in cat_cols_train: cat_var_summary(df_train, i) df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # # # Data Preprocessing df_train["Age"].fillna(df_train["Age"].median(skipna=True), inplace=True) df_train["Embarked"].fillna(df_train["Embarked"].value_counts().idxmax(), inplace=True) df_train.drop("Cabin", axis=1, inplace=True) df_train.head(10) fig = plt.figure(figsize=(9, 6)) sns.distplot(train["Age"], color="red") sns.distplot(df_train["Age"], color="blue") fig.legend(labels=["Age with Missing Values", "Age with Adjusted Values"]) df_train["TravelAlone"] = np.where((df_train["SibSp"] + df_train["Parch"]) > 0, 0, 1) df_train.drop("SibSp", axis=1, inplace=True) df_train.drop("Parch", axis=1, inplace=True) df_train.head() training = pd.get_dummies(df_train, columns=["Pclass", "Embarked", "Sex"]) training.drop("Sex_female", axis=1, inplace=True) training.drop("PassengerId", axis=1, inplace=True) training.drop("Name", axis=1, inplace=True) training.drop("Ticket", axis=1, inplace=True) final_train = training final_train.head() test_data = df_test.copy() test_data["Age"].fillna(df_train["Age"].median(skipna=True), inplace=True) test_data["Fare"].fillna(df_train["Fare"].median(skipna=True), inplace=True) test_data.drop("Cabin", axis=1, inplace=True) test_data["TravelAlone"] = np.where((test_data["SibSp"] + test_data["Parch"]) > 0, 0, 1) test_data.drop("SibSp", axis=1, inplace=True) test_data.drop("Parch", axis=1, inplace=True) testing = pd.get_dummies(test_data, columns=["Pclass", "Embarked", "Sex"]) testing.drop("Sex_female", axis=1, inplace=True) testing.drop("PassengerId", axis=1, inplace=True) testing.drop("Name", axis=1, inplace=True) testing.drop("Ticket", axis=1, inplace=True) final_test = testing final_test.head() plt.figure(figsize=(9, 6)) ax = sns.kdeplot( final_train["Age"][final_train.Survived == 1], color="darkturquoise", shade=True ) sns.kdeplot( final_train["Age"][final_train.Survived == 0], color="lightcoral", shade=True ) plt.legend(["Survived", "Died"]) plt.title("Density Plot of Age for Surviving Population and Deceased Population") ax.set(xlabel="Age") plt.xlim(-10, 85) plt.show() plt.figure(figsize=(12, 7)) avg_survival_byage = ( final_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean() ) g = sns.barplot(x="Age", y="Survived", data=avg_survival_byage, color="LightSeaGreen") plt.show() final_train["IsMinor"] = np.where(final_train["Age"] <= 16, 1, 0) final_test["IsMinor"] = np.where(final_test["Age"] <= 16, 1, 0) plt.figure(figsize=(9, 6)) ax = sns.kdeplot( final_train["Fare"][final_train.Survived == 1], color="darkturquoise", shade=True ) sns.kdeplot( final_train["Fare"][final_train.Survived == 0], color="lightcoral", shade=True ) plt.legend(["Survived", "Died"]) plt.title("Density Plot of Fare for Surviving Population and Deceased Population") ax.set(xlabel="Fare") plt.xlim(-20, 200) plt.show() # y = final_train["Survived"] # X = final_train.drop(["Survived"], axis=1) cat_cols_train, num_cols_train, cat_but_car_train = grab_col_names(final_train) rs = RobustScaler() final_train[num_cols_train] = rs.fit_transform(final_train[num_cols_train]) y = final_train["Survived"] X = final_train.drop(["Survived"], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42 ) # # # Models def model_performance(model): y_pred = model.fit(X_train, y_train).predict(X_test) fig = make_subplots(rows=1, cols=2, subplot_titles=("Confusion Matrix", "Metrics")) confusion = confusion_matrix(y_test, y_pred) tp = confusion[1, 1] fn = confusion[1, 0] fp = confusion[0, 1] tn = confusion[0, 0] accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * ( ((tp / (tp + fp)) * (tp / (tp + fn))) / ((tp / (tp + fp)) + (tp / (tp + fn))) ) colors = ["#93e4c1", "#3baea0", "#118a7e", "#1f6f78"] show_metrics = pd.DataFrame(data=[[accuracy, precision, recall, f1_score]]) show_metrics = show_metrics.T fig.add_trace( go.Heatmap( z=confusion, x=["0 (pred)", "1 (pred)"], y=["0 (true)", "1 (true)"], xgap=2, ygap=2, colorscale="darkmint", showscale=False, ), row=1, col=1, ) fig.add_trace( go.Bar( x=(show_metrics[0].values), y=["Accuracy", "Precision", "Recall", "F1_score"], text=np.round_(show_metrics[0].values, 4), textposition="auto", textfont=dict(color="white"), orientation="h", opacity=1, marker=dict(color=colors, line=dict(color="white", width=1.5)), ), row=1, col=2, ) fig.update_layout( title={ "text": model.__class__.__name__, "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, template="plotly_dark", ) iplot(fig) random_state = 42 models = [ GaussianNB(), DecisionTreeClassifier(random_state=random_state), SVC(random_state=random_state, probability=True), RandomForestClassifier(random_state=random_state), LogisticRegression(random_state=random_state), KNeighborsClassifier(), GradientBoostingClassifier(random_state=random_state), CatBoostClassifier(random_state=random_state), XGBClassifier(random_state=random_state), LGBMClassifier(random_state=random_state), AdaBoostClassifier(random_state=random_state), ] for model in models: model_performance(model) # # # Hyperparameter Tuning SEED = 42 cross_valid_scores = {} # Decision Tree parameters = { "max_depth": [3, 5, 7, 9, 11, 13], } model_desicion_tree = DecisionTreeClassifier( random_state=SEED, class_weight="balanced", ) model_desicion_tree = GridSearchCV( model_desicion_tree, parameters, cv=5, scoring="accuracy", ) model_desicion_tree.fit(X_train, y_train) print("-----") print(f"Best parameters {model_desicion_tree.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_desicion_tree.best_score_:.3f}" ) cross_valid_scores["desicion_tree"] = model_desicion_tree.best_score_ print("-----") # Random Forest parameters = { "n_estimators": [5, 10, 15, 20, 25], "max_depth": [3, 5, 7, 9, 11, 13], } model_random_forest = RandomForestClassifier( random_state=SEED, class_weight="balanced", ) model_random_forest = GridSearchCV( model_random_forest, parameters, cv=5, scoring="accuracy", ) model_random_forest.fit(X_train, y_train) print("-----") print(f"Best parameters {model_random_forest.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_random_forest.best_score_:.3f}" ) cross_valid_scores["random_forest"] = model_random_forest.best_score_ print("-----") # Adaboost parameters = { "n_estimators": [5, 10, 15, 20, 25, 50, 75, 100], "learning_rate": [0.001, 0.01, 0.1, 1.0], } model_adaboost = AdaBoostClassifier( random_state=SEED, ) model_adaboost = GridSearchCV( model_adaboost, parameters, cv=5, scoring="accuracy", ) model_adaboost.fit(X_train, y_train) print("-----") print(f"Best parameters {model_adaboost.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_adaboost.best_score_:.3f}" ) cross_valid_scores["ada_boost"] = model_adaboost.best_score_ print("-----") # Xgboost parameters = { "max_depth": [3, 5, 7, 9], "n_estimators": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], } model_xgb = XGBClassifier( random_state=SEED, ) model_xgb = GridSearchCV( model_xgb, parameters, cv=5, scoring="accuracy", ) model_xgb.fit(X_train, y_train) print("-----") print(f"Best parameters {model_xgb.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_xgb.best_score_:.3f}" ) cross_valid_scores["xgboost"] = model_xgb.best_score_ print("-----") # LightGBM cat_cols = [ "TravelAlone", "Pclass_1", "Pclass_2", "Pclass_3", "Embarked_C", "Embarked_S", "Sex_male", "IsMinor", ] parameters = { "n_estimators": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], "num_leaves": [7, 15, 31], } model_lgbm = LGBMClassifier( random_state=SEED, class_weight="balanced", ) model_lgbm = GridSearchCV( model_lgbm, parameters, cv=5, scoring="accuracy", ) model_lgbm.fit(X_train, y_train, categorical_feature=cat_cols) print("-----") print(f"Best parameters {model_lgbm.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_lgbm.best_score_:.3f}" ) cross_valid_scores["lightgbm"] = model_lgbm.best_score_ print("-----") # CatBoost parameters = { "iterations": [5, 10, 15, 20, 25, 50, 100], "learning_rate": [0.01, 0.05, 0.1], "depth": [3, 5, 7, 9, 11, 13], } model_catboost = CatBoostClassifier( verbose=False, ) model_catboost = GridSearchCV( model_catboost, parameters, cv=5, scoring="accuracy", ) model_catboost.fit(X_train, y_train) print("-----") print(f"Best parameters {model_catboost.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_catboost.best_score_:.3f}" ) cross_valid_scores["catboost"] = model_catboost.best_score_ print("-----") # Logistic Regression parameters = {"C": [0.001, 0.01, 0.1, 1.0], "penalty": ["l1", "l2"]} model_logistic_regression = LogisticRegression( random_state=SEED, class_weight="balanced", solver="liblinear", ) model_logistic_regression = GridSearchCV( model_logistic_regression, parameters, cv=5, scoring="accuracy", ) model_logistic_regression.fit(X_train, y_train) print("-----") print(f"Best parameters {model_logistic_regression.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_logistic_regression.best_score_:.3f}" ) cross_valid_scores["logistic_regression"] = model_logistic_regression.best_score_ print("-----") # SVC parameters = { "C": [0.001, 0.01, 0.1, 1.0], "kernel": ["linear", "poly", "rbf", "sigmoid"], "gamma": ["scale", "auto"], } model_svc = SVC( random_state=SEED, class_weight="balanced", probability=True, ) model_svc = GridSearchCV( model_svc, parameters, cv=5, scoring="accuracy", ) model_svc.fit(X_train, y_train) print("-----") print(f"Best parameters {model_svc.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_svc.best_score_:.3f}" ) cross_valid_scores["svc"] = model_svc.best_score_ print("-----") # KNN parameters = { "weights": ["uniform", "distance"], } model_k_neighbors = KNeighborsClassifier() model_k_neighbors = GridSearchCV( model_k_neighbors, parameters, cv=5, scoring="accuracy", ) model_k_neighbors.fit(X_train, y_train) print("-----") print(f"Best parameters {model_k_neighbors.best_params_}") print( f"Mean cross-validated accuracy score of the best_estimator: " + f"{model_k_neighbors.best_score_:.3f}" ) cross_valid_scores["k_neighbors"] = model_k_neighbors.best_score_ print("-----")
false
0
8,079
25
8,079
8,079
129623888
<jupyter_start><jupyter_text>Data Science Salaries 2023 💸 Data Science Job Salaries Dataset contains 11 columns, each are: 1. work_year: The year the salary was paid. 2. experience_level: The experience level in the job during the year 3. employment_type: The type of employment for the role 4. job_title: The role worked in during the year. 5. salary: The total gross salary amount paid. 6. salary_currency: The currency of the salary paid as an ISO 4217 currency code. 7. salaryinusd: The salary in USD 8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code. 9. remote_ratio: The overall amount of work done remotely 10. company_location: The country of the employer's main office or contracting branch 11. company_size: The median number of people that worked for the company during the year Kaggle dataset identifier: data-science-salaries-2023 <jupyter_script># # Data Science Salaries EDA 2023 # ## Importing the libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # ## Loading the data data = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv") # ### Data cleaning and understanding the data data.shape data.head() data.info() data.isnull().sum() data.duplicated().sum() data.drop_duplicates(keep="first", inplace=True) data.duplicated().sum() data.describe() # # EDA and Visualization # ### Top 10 job Tiles in 2023 a = ( data[data["work_year"] == 2023]["job_title"] .value_counts() .nlargest(10) .reset_index() ) sns.barplot(data=a, x="job_title", y="index") # ### Top 10 average salaries by job title in 2023 a = ( data["salary_in_usd"] .groupby(data["job_title"]) .mean() .round(0) .nlargest(10) .sort_values(ascending=False) .reset_index() ) plt.figure(figsize=(12, 8)) ax = sns.barplot(data=a, y="job_title", x="salary_in_usd") ax.set( ylabel="Job title", xlabel="Salary in USD", title="Top 10 average salaries by job title", ) # ### Experience levels data["experience_level"].unique() data["experience_level"] = data["experience_level"].replace("SE", "Senior-level") data["experience_level"] = data["experience_level"].replace("MI", "Medium_level") data["experience_level"] = data["experience_level"].replace("EN", "Entry-level") data["experience_level"] = data["experience_level"].replace("EX", "Executive-level") cx = sns.countplot(data=data, x=data.experience_level) cx.set(xlabel="Experience Level", title="Experience Levels") # #### As you can see, the senior-level positions have the highest count, followed by mid-level and junior positions. There are fewer executive-level positions compared to other levels. # ### Employment type data["employment_type"].unique() data["employment_type"] = data["employment_type"].replace("FT", "Full_time") data["employment_type"] = data["employment_type"].replace("CT", "Contractual") data["employment_type"] = data["employment_type"].replace("PT", "Part_time") data["employment_type"] = data["employment_type"].replace("FL", "Freelance") bx = sns.countplot(data=data, x=data.employment_type, hue="experience_level") bx.set(ylabel="counts", title="Number of Employment types") # ### Salaries by employment types fig, ax = plt.subplots() fx = sns.barplot( ax=ax, data=data, x="employment_type", y="salary_in_usd", errorbar=None, hue="work_year", ) fx.bar_label(fx.containers[3]) # Here we can see average salaries for full time employees has increased over the years. # # Salaries by work experience sw = ( data["salary_in_usd"] .groupby(data["experience_level"]) .mean() .round() .sort_values(ascending=False) .reset_index() ) sx = sns.barplot(data=sw, x="experience_level", y="salary_in_usd") sx.set( xlabel="Experience Level", ylabel="Average Salary", title="Average salaries by Experience", ) # Here we can see executive level has the highest average salaries followed by senior level. # ## Salaries by work years sy = data["salary_in_usd"].groupby(data["work_year"]).mean() plt.title("Average Salary by work year") plt.xlabel("Work year") plt.ylabel("Avg Salary") sns.lineplot(x=["2020", "2021", "2022", "2023"], y=sy)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/623/129623888.ipynb
data-science-salaries-2023
arnabchaki
[{"Id": 129623888, "ScriptId": 38520604, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10336782, "CreationDate": "05/15/2023 10:02:03", "VersionNumber": 2.0, "Title": "Data_science_salaries_eda", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 39.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 185884599, "KernelVersionId": 129623888, "SourceDatasetVersionId": 5392837}]
[{"Id": 5392837, "DatasetId": 3125926, "DatasourceVersionId": 5466555, "CreatorUserId": 7428813, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "04/13/2023 09:55:16", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023 \ud83d\udcb8", "Slug": "data-science-salaries-2023", "Subtitle": "Salaries of Different Data Science Fields in the Data Science Domain", "Description": "Data Science Job Salaries Dataset contains 11 columns, each are:\n\n1. work_year: The year the salary was paid.\n2. experience_level: The experience level in the job during the year\n3. employment_type: The type of employment for the role\n4. job_title: The role worked in during the year.\n5. salary: The total gross salary amount paid.\n6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.\n7. salaryinusd: The salary in USD\n8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.\n9. remote_ratio: The overall amount of work done remotely\n10. company_location: The country of the employer's main office or contracting branch\n11. company_size: The median number of people that worked for the company during the year", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3125926, "CreatorUserId": 7428813, "OwnerUserId": 7428813.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5392837.0, "CurrentDatasourceVersionId": 5466555.0, "ForumId": 3189506, "Type": 2, "CreationDate": "04/13/2023 09:55:16", "LastActivityDate": "04/13/2023", "TotalViews": 234449, "TotalDownloads": 44330, "TotalVotes": 1244, "TotalKernels": 184}]
[{"Id": 7428813, "UserName": "arnabchaki", "DisplayName": "randomarnab", "RegisterDate": "05/16/2021", "PerformanceTier": 2}]
# # Data Science Salaries EDA 2023 # ## Importing the libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # ## Loading the data data = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv") # ### Data cleaning and understanding the data data.shape data.head() data.info() data.isnull().sum() data.duplicated().sum() data.drop_duplicates(keep="first", inplace=True) data.duplicated().sum() data.describe() # # EDA and Visualization # ### Top 10 job Tiles in 2023 a = ( data[data["work_year"] == 2023]["job_title"] .value_counts() .nlargest(10) .reset_index() ) sns.barplot(data=a, x="job_title", y="index") # ### Top 10 average salaries by job title in 2023 a = ( data["salary_in_usd"] .groupby(data["job_title"]) .mean() .round(0) .nlargest(10) .sort_values(ascending=False) .reset_index() ) plt.figure(figsize=(12, 8)) ax = sns.barplot(data=a, y="job_title", x="salary_in_usd") ax.set( ylabel="Job title", xlabel="Salary in USD", title="Top 10 average salaries by job title", ) # ### Experience levels data["experience_level"].unique() data["experience_level"] = data["experience_level"].replace("SE", "Senior-level") data["experience_level"] = data["experience_level"].replace("MI", "Medium_level") data["experience_level"] = data["experience_level"].replace("EN", "Entry-level") data["experience_level"] = data["experience_level"].replace("EX", "Executive-level") cx = sns.countplot(data=data, x=data.experience_level) cx.set(xlabel="Experience Level", title="Experience Levels") # #### As you can see, the senior-level positions have the highest count, followed by mid-level and junior positions. There are fewer executive-level positions compared to other levels. # ### Employment type data["employment_type"].unique() data["employment_type"] = data["employment_type"].replace("FT", "Full_time") data["employment_type"] = data["employment_type"].replace("CT", "Contractual") data["employment_type"] = data["employment_type"].replace("PT", "Part_time") data["employment_type"] = data["employment_type"].replace("FL", "Freelance") bx = sns.countplot(data=data, x=data.employment_type, hue="experience_level") bx.set(ylabel="counts", title="Number of Employment types") # ### Salaries by employment types fig, ax = plt.subplots() fx = sns.barplot( ax=ax, data=data, x="employment_type", y="salary_in_usd", errorbar=None, hue="work_year", ) fx.bar_label(fx.containers[3]) # Here we can see average salaries for full time employees has increased over the years. # # Salaries by work experience sw = ( data["salary_in_usd"] .groupby(data["experience_level"]) .mean() .round() .sort_values(ascending=False) .reset_index() ) sx = sns.barplot(data=sw, x="experience_level", y="salary_in_usd") sx.set( xlabel="Experience Level", ylabel="Average Salary", title="Average salaries by Experience", ) # Here we can see executive level has the highest average salaries followed by senior level. # ## Salaries by work years sy = data["salary_in_usd"].groupby(data["work_year"]).mean() plt.title("Average Salary by work year") plt.xlabel("Work year") plt.ylabel("Avg Salary") sns.lineplot(x=["2020", "2021", "2022", "2023"], y=sy)
false
1
1,066
3
1,314
1,066
129623401
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import datetime import re import string # ### Load csv files import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ### Read files train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") train test submission # ### Analyse train sns.displot(train["target"], kde=True) train["target"].value_counts() num_classes = train["target"].nunique() print(num_classes) # ### Split dataset X = train["text"] y = train["target"] X_test = test["text"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.1, random_state=42, shuffle=y ) X_train.shape, X_val.shape, y_train.shape, y_val.shape, X_test.shape # ### Tensorflow import tensorflow as tf print(tf.__version__) import tensorflow_hub as hub import tensorflow_text as text from tensorflow import keras from keras.callbacks import ModelCheckpoint, EarlyStopping bert_preprocess = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3" ) bert_encoder = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4" ) text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name="text") preprocessed_text = bert_preprocess(text_input) outputs = bert_encoder(preprocessed_text) l = tf.keras.layers.Dropout(0.1, name="dropout")(outputs["pooled_output"]) l = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(l) model = tf.keras.Model(inputs=[text_input], outputs=[l]) model.summary() METRICS = [ tf.keras.metrics.BinaryAccuracy(name="accuracy"), tf.keras.metrics.Precision(name="precision"), tf.keras.metrics.Recall(name="recall"), ] model.compile(optimizer="adam", loss="binary_crossentropy", metrics=METRICS) keras.utils.plot_model(model, "sentiment_classifier.png") early_stopping = EarlyStopping(monitor="val_loss", mode="min", patience=25, verbose=1) mc = ModelCheckpoint( "best_model.tf", monitor="val_loss", mode="min", save_best_only=True ) model.fit( X_train, y_train, epochs=15, validation_data=(X_val, y_val), verbose=1, callbacks=[early_stopping, mc], ) results = model.evaluate(X_val, y_val, batch_size=128) model.save("tweet_model") predictions = model.predict(X_test) predictions = predictions.flatten() predictions cutoff = (predictions.max() - predictions.min()) / 2 predictions = np.where(predictions > 0.5, 1, 0) predictions sns.displot(predictions, kde=True) count_pred = np.unique(predictions, return_counts=True) count_pred # ### Submission submission["target"] = predictions submission.to_csv("submission.csv", index=False) # writing data to a CSV file submission = pd.read_csv("submission.csv") submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/623/129623401.ipynb
null
null
[{"Id": 129623401, "ScriptId": 37489748, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13876321, "CreationDate": "05/15/2023 09:57:20", "VersionNumber": 1.0, "Title": "NLP with Disaster Tweets", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 110.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
null
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import datetime import re import string # ### Load csv files import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ### Read files train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") train test submission # ### Analyse train sns.displot(train["target"], kde=True) train["target"].value_counts() num_classes = train["target"].nunique() print(num_classes) # ### Split dataset X = train["text"] y = train["target"] X_test = test["text"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.1, random_state=42, shuffle=y ) X_train.shape, X_val.shape, y_train.shape, y_val.shape, X_test.shape # ### Tensorflow import tensorflow as tf print(tf.__version__) import tensorflow_hub as hub import tensorflow_text as text from tensorflow import keras from keras.callbacks import ModelCheckpoint, EarlyStopping bert_preprocess = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3" ) bert_encoder = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4" ) text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name="text") preprocessed_text = bert_preprocess(text_input) outputs = bert_encoder(preprocessed_text) l = tf.keras.layers.Dropout(0.1, name="dropout")(outputs["pooled_output"]) l = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(l) model = tf.keras.Model(inputs=[text_input], outputs=[l]) model.summary() METRICS = [ tf.keras.metrics.BinaryAccuracy(name="accuracy"), tf.keras.metrics.Precision(name="precision"), tf.keras.metrics.Recall(name="recall"), ] model.compile(optimizer="adam", loss="binary_crossentropy", metrics=METRICS) keras.utils.plot_model(model, "sentiment_classifier.png") early_stopping = EarlyStopping(monitor="val_loss", mode="min", patience=25, verbose=1) mc = ModelCheckpoint( "best_model.tf", monitor="val_loss", mode="min", save_best_only=True ) model.fit( X_train, y_train, epochs=15, validation_data=(X_val, y_val), verbose=1, callbacks=[early_stopping, mc], ) results = model.evaluate(X_val, y_val, batch_size=128) model.save("tweet_model") predictions = model.predict(X_test) predictions = predictions.flatten() predictions cutoff = (predictions.max() - predictions.min()) / 2 predictions = np.where(predictions > 0.5, 1, 0) predictions sns.displot(predictions, kde=True) count_pred = np.unique(predictions, return_counts=True) count_pred # ### Submission submission["target"] = predictions submission.to_csv("submission.csv", index=False) # writing data to a CSV file submission = pd.read_csv("submission.csv") submission
false
0
967
5
967
967
129623632
# This is a simple xgboost model that requires little preprocessing and can serve as a baseline model for test submission to the competition. import pandas as pd import numpy as np import xgboost as xgb from sklearn.model_selection import train_test_split, KFold from sklearn.metrics import accuracy_score, log_loss import itertools # **Load and preprocess testing data** df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_train.head() df_train.columns features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"] y_train = df_train.Survived df_train.Cabin.unique() import pandas as pd import numpy as np from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import xgboost as xgb # Load the data train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv") # Extract features and target X_train = train_data.drop( ["Survived", "PassengerId", "Name", "Ticket", "Cabin"], axis=1 ) y_train = train_data["Survived"] # Define the categorical and numerical features cat_features = ["Sex", "Embarked"] num_features = ["Pclass", "Age", "SibSp", "Parch", "Fare"] # Define the column transformer to one-hot encode categorical features preprocessor = ColumnTransformer( transformers=[("cat", OneHotEncoder(handle_unknown="ignore"), cat_features)], remainder="passthrough", ) # Define the pipeline with the column transformer and the XGBoost model # pipeline = Pipeline([ # ('preprocessor', preprocessor), # ('model', xgb.XGBClassifier(max_depth=3, learning_rate=0.1, n_estimators=100, random_state=42)) # ]) # # Split the data into training and validation sets # X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42) param_space = {"max_depth": [3, 5, 7], "min_child_weight": [1.0, 2.0, 4.0]} param_combinations = itertools.product( param_space["max_depth"], param_space["min_child_weight"] ) params = [] scores - [] for max_depth, min_child_weight in param_combinations: score_folds = [] kf = KFold(n_splits=4, shuffle=True) for tr_idx, va_idex in kf.split(X_train): tr_x, va_x = X_train.iloc[tr_idx], X_train.iloc[va_idx] tr_y, va_y = y_train.iloc[tr_idx], y_train.iloc[va_idx] pipeline = Pipeline( [ ("preprocessor", preprocessor), ( "model", xgb.XGBClassifier( n_estimators=100, max_depth=max_depth, min_child_weight=min_child_weight, learning_rate=0.1, ), ), ] ) va_pred = pipeline.predict(va_x) logloss = log_loss(va_y, va_pred) score_folds.append(logloss) score_mean = np.mean(score_folds) params.append((max_depth, min_child_weight)) scores.append(score_mean) best_idx = np.argsort(scores)[0] best_param = params[best_idx] print(f"max_depth: {best_param[0]}, min_child_weight: {best_param[1]}") # **Build and train model** # Train the model on the training set and evaluate it on the validation set pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) accuracy = accuracy_score(y_val, y_pred) print("Validation accuracy:", accuracy) from sklearn.linear_model import RandomForestClassifier model_rf = RandomForestClassifier() model_rf.fit(X_train, y_train) pred_rf = model_rf.predict(df_test) # **Predict on test dataset** df_test = pd.read_csv("/kaggle/input/titanic/test.csv") df_test.head() # **Prepare submission file** df_sub = pd.read_csv("/kaggle/input/titanic/gender_submission.csv") df_sub.head() pred = pipeline.predict(df_test) * 0.8 + model_rf.predict(df_test) * 0.2 pred_label = np.where(pred > 0.5, 1, 0) pred_label[:5] df_sub["Survived"] = pred_label df_sub.Survived.describe()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/623/129623632.ipynb
null
null
[{"Id": 129623632, "ScriptId": 38512146, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11474584, "CreationDate": "05/15/2023 09:59:28", "VersionNumber": 1.0, "Title": "titanic_xgboost_rf_ensemble", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 124.0, "LinesInsertedFromPrevious": 124.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# This is a simple xgboost model that requires little preprocessing and can serve as a baseline model for test submission to the competition. import pandas as pd import numpy as np import xgboost as xgb from sklearn.model_selection import train_test_split, KFold from sklearn.metrics import accuracy_score, log_loss import itertools # **Load and preprocess testing data** df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_train.head() df_train.columns features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"] y_train = df_train.Survived df_train.Cabin.unique() import pandas as pd import numpy as np from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import xgboost as xgb # Load the data train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv") # Extract features and target X_train = train_data.drop( ["Survived", "PassengerId", "Name", "Ticket", "Cabin"], axis=1 ) y_train = train_data["Survived"] # Define the categorical and numerical features cat_features = ["Sex", "Embarked"] num_features = ["Pclass", "Age", "SibSp", "Parch", "Fare"] # Define the column transformer to one-hot encode categorical features preprocessor = ColumnTransformer( transformers=[("cat", OneHotEncoder(handle_unknown="ignore"), cat_features)], remainder="passthrough", ) # Define the pipeline with the column transformer and the XGBoost model # pipeline = Pipeline([ # ('preprocessor', preprocessor), # ('model', xgb.XGBClassifier(max_depth=3, learning_rate=0.1, n_estimators=100, random_state=42)) # ]) # # Split the data into training and validation sets # X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42) param_space = {"max_depth": [3, 5, 7], "min_child_weight": [1.0, 2.0, 4.0]} param_combinations = itertools.product( param_space["max_depth"], param_space["min_child_weight"] ) params = [] scores - [] for max_depth, min_child_weight in param_combinations: score_folds = [] kf = KFold(n_splits=4, shuffle=True) for tr_idx, va_idex in kf.split(X_train): tr_x, va_x = X_train.iloc[tr_idx], X_train.iloc[va_idx] tr_y, va_y = y_train.iloc[tr_idx], y_train.iloc[va_idx] pipeline = Pipeline( [ ("preprocessor", preprocessor), ( "model", xgb.XGBClassifier( n_estimators=100, max_depth=max_depth, min_child_weight=min_child_weight, learning_rate=0.1, ), ), ] ) va_pred = pipeline.predict(va_x) logloss = log_loss(va_y, va_pred) score_folds.append(logloss) score_mean = np.mean(score_folds) params.append((max_depth, min_child_weight)) scores.append(score_mean) best_idx = np.argsort(scores)[0] best_param = params[best_idx] print(f"max_depth: {best_param[0]}, min_child_weight: {best_param[1]}") # **Build and train model** # Train the model on the training set and evaluate it on the validation set pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) accuracy = accuracy_score(y_val, y_pred) print("Validation accuracy:", accuracy) from sklearn.linear_model import RandomForestClassifier model_rf = RandomForestClassifier() model_rf.fit(X_train, y_train) pred_rf = model_rf.predict(df_test) # **Predict on test dataset** df_test = pd.read_csv("/kaggle/input/titanic/test.csv") df_test.head() # **Prepare submission file** df_sub = pd.read_csv("/kaggle/input/titanic/gender_submission.csv") df_sub.head() pred = pipeline.predict(df_test) * 0.8 + model_rf.predict(df_test) * 0.2 pred_label = np.where(pred > 0.5, 1, 0) pred_label[:5] df_sub["Survived"] = pred_label df_sub.Survived.describe()
false
0
1,284
0
1,284
1,284
129623787
<jupyter_start><jupyter_text>Bank Customer Churn RowNumber—corresponds to the record (row) number and has no effect on the output. CustomerId—contains random values and has no effect on customer leaving the bank. Surname—the surname of a customer has no impact on their decision to leave the bank. CreditScore—can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank. Geography—a customer’s location can affect their decision to leave the bank. Gender—it’s interesting to explore whether gender plays a role in a customer leaving the bank. Age—this is certainly relevant, since older customers are less likely to leave their bank than younger ones. Tenure—refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank. Balance—also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances. NumOfProducts—refers to the number of products that a customer has purchased through the bank. HasCrCard—denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank. IsActiveMember—active customers are less likely to leave the bank. EstimatedSalary—as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries. Exited—whether or not the customer left the bank. Complain—customer has complaint or not. Satisfaction Score—Score provided by the customer for their complaint resolution. Card Type—type of card hold by the customer. Points Earned—the points earned by the customer for using credit card. Acknowledgements As we know, it is much more expensive to sign in a new client than keeping an existing one. It is advantageous for banks to know what leads a client towards the decision to leave the company. Churn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible. Kaggle dataset identifier: bank-customer-churn <jupyter_script># ![Bank churn.png](attachment:839d72dd-c9c5-4f22-94aa-9db08733c9aa.png) # # Bank Customer Churn Analysis # This notebook explores the "Bank Customer Churn" dataset to analyze customer churn and derive insights to help the bank improve customer retention strategies. Customer churn refers to the phenomenon where customers discontinue their relationship with a company or stop using its products or services. Understanding the factors that contribute to churn is crucial for businesses to retain customers and maintain long-term profitability. # The dataset used in this analysis contains information about bank customers, including their demographic details, credit score, account balance, tenure, and various other features. The target variable is "Exited," which indicates whether a customer has churned (1) or not (0). By exploring the data and performing data analysis, feature engineering, and predictive modeling, we aim to identify patterns and factors associated with customer churn. # **The notebook covers the following key steps:** # * **Data Loading and Exploration:** Load the dataset and perform initial exploration to understand its structure, feature types, and any missing values. # * **Exploratory Data Analysis (EDA):** Conduct an in-depth analysis of the dataset, including visualizations and statistical summaries, to gain insights into the distribution and relationships between variables. # * **Feature Engineering:** Transform and engineer new features based on the existing variables to capture additional information and improve the predictive power of the model. # * **Feature Selection:** Select the most relevant features that have a significant impact on customer churn prediction. # * **Predictive Modeling:** Build and evaluate predictive models using machine learning algorithms to predict customer churn and measure their performance. # * **Model Interpretation and Insights:** Analyze the model results, feature importances, and coefficients to extract meaningful insights and actionable recommendations to reduce churn and improve customer retention. # Throughout the notebook, code snippets, visualizations, and explanations will be provided to guide the analysis process. Let's get started with loading and exploring the dataset! # Feel free to customize the introduction based on the specific analysis and insights you plan to cover in your notebook. # Import necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.feature_selection import SelectKBest, f_classif from sklearn.ensemble import RandomForestClassifier # Load the dataset data = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") # Display the first few rows of the dataset print(data.head()) # Check the dimensions of the dataset print("Number of rows:", data.shape[0]) print("Number of columns:", data.shape[1]) print(data.columns) # Check the data types of each column print(data.dtypes) # Check for missing values print(data.isnull().sum()) # Summary statistics print(data.describe()) # # Data visualization # Churn distribution plt.figure(figsize=(6, 6)) churn_counts = data["Exited"].value_counts() plt.pie(churn_counts, labels=["Retained", "Churned"], autopct="%1.1f%%", startangle=90) plt.axis("equal") plt.title("Churn Distribution") plt.show() # Churn by gender plt.figure(figsize=(8, 6)) sns.countplot(x="Gender", hue="Exited", data=data) plt.title("Churn by Gender") plt.show() # Churn by geography plt.figure(figsize=(8, 6)) sns.countplot(x="Geography", hue="Exited", data=data) plt.title("Churn by Geography") plt.show() # Churn by age group data["AgeGroup"] = pd.cut( data["Age"], bins=[0, 30, 40, 50, 60, np.inf], labels=["<30", "30-40", "40-50", "50-60", "60+"], ) plt.figure(figsize=(8, 6)) sns.countplot(x="AgeGroup", hue="Exited", data=data) plt.title("Churn by Age Group") plt.show() # # Insights # Churn rate by gender churn_rate_gender = data.groupby("Gender")["Exited"].mean() print(churn_rate_gender) # Based on the provided dataset, the churn rate by gender is as follows: # * **Female: 25.1%** # * **Male: 16.5%** # This means that the churn rate for female customers is higher (25.1%) compared to male customers (16.5%). # Churn rate by geography churn_rate_geography = data.groupby("Geography")["Exited"].mean() print(churn_rate_geography) # Based on the provided dataset, the churn rate by geography is as follows: # * **France: 16.2%** # * **Germany: 32.4%** # * **Spain: 16.7%** # This indicates that the churn rate is highest in Germany (32.4%), followed by Spain (16.7%) and France (16.2%). # Churn rate by age group churn_rate_age = data.groupby("AgeGroup")["Exited"].mean() print(churn_rate_age) # # Based on the provided dataset, the churn rate by age group is as follows: # * **<30: 7.5%** # * **30-40: 12.1%** # * **40-50: 33.9%** # * **50-60: 56.2%** # * **60+: 24.8%** # These churn rates indicate that the highest churn rate is observed in the 50-60 age group (56.2%), followed by the 40-50 age group (33.9%). The lowest churn rate is observed in the <30 age group (7.5%). # Churn by Number of Products: plt.figure(figsize=(8, 6)) sns.countplot(x="NumOfProducts", hue="Exited", data=data) plt.title("Churn by Number of Products") plt.show() # Churn by Credit Card plt.figure(figsize=(8, 6)) sns.countplot(x="HasCrCard", hue="Exited", data=data) plt.title("Churn by Credit Card") plt.show() # Churn by Activity Status plt.figure(figsize=(8, 6)) sns.countplot(x="IsActiveMember", hue="Exited", data=data) plt.title("Churn by Activity Status") plt.show() # Churn by Balance: plt.figure(figsize=(10, 6)) sns.boxplot(x="Exited", y="Balance", data=data) plt.title("Churn by Balance") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/623/129623787.ipynb
bank-customer-churn
radheshyamkollipara
[{"Id": 129623787, "ScriptId": 38545081, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11723377, "CreationDate": "05/15/2023 10:01:04", "VersionNumber": 1.0, "Title": "Bank Customer Churn - EDA Insights", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 183.0, "LinesInsertedFromPrevious": 183.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 19}]
[{"Id": 185884285, "KernelVersionId": 129623787, "SourceDatasetVersionId": 5550559}]
[{"Id": 5550559, "DatasetId": 3197960, "DatasourceVersionId": 5625285, "CreatorUserId": 14862076, "LicenseName": "Other (specified in description)", "CreationDate": "04/28/2023 16:32:01", "VersionNumber": 1.0, "Title": "Bank Customer Churn", "Slug": "bank-customer-churn", "Subtitle": "Bank Customer Data for Customer Churn", "Description": "RowNumber\u2014corresponds to the record (row) number and has no effect on the output.\nCustomerId\u2014contains random values and has no effect on customer leaving the bank.\nSurname\u2014the surname of a customer has no impact on their decision to leave the bank.\nCreditScore\u2014can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank.\nGeography\u2014a customer\u2019s location can affect their decision to leave the bank.\nGender\u2014it\u2019s interesting to explore whether gender plays a role in a customer leaving the bank.\nAge\u2014this is certainly relevant, since older customers are less likely to leave their bank than younger ones.\nTenure\u2014refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank.\nBalance\u2014also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances.\nNumOfProducts\u2014refers to the number of products that a customer has purchased through the bank.\nHasCrCard\u2014denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank.\nIsActiveMember\u2014active customers are less likely to leave the bank.\nEstimatedSalary\u2014as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries.\nExited\u2014whether or not the customer left the bank.\nComplain\u2014customer has complaint or not.\nSatisfaction Score\u2014Score provided by the customer for their complaint resolution.\nCard Type\u2014type of card hold by the customer.\nPoints Earned\u2014the points earned by the customer for using credit card.\n\nAcknowledgements\n\nAs we know, it is much more expensive to sign in a new client than keeping an existing one.\n\nIt is advantageous for banks to know what leads a client towards the decision to leave the company.\n\nChurn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3197960, "CreatorUserId": 14862076, "OwnerUserId": 14862076.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5550559.0, "CurrentDatasourceVersionId": 5625285.0, "ForumId": 3262570, "Type": 2, "CreationDate": "04/28/2023 16:32:01", "LastActivityDate": "04/28/2023", "TotalViews": 39315, "TotalDownloads": 6814, "TotalVotes": 97, "TotalKernels": 52}]
[{"Id": 14862076, "UserName": "radheshyamkollipara", "DisplayName": "Radheshyam Kollipara", "RegisterDate": "04/28/2023", "PerformanceTier": 0}]
# ![Bank churn.png](attachment:839d72dd-c9c5-4f22-94aa-9db08733c9aa.png) # # Bank Customer Churn Analysis # This notebook explores the "Bank Customer Churn" dataset to analyze customer churn and derive insights to help the bank improve customer retention strategies. Customer churn refers to the phenomenon where customers discontinue their relationship with a company or stop using its products or services. Understanding the factors that contribute to churn is crucial for businesses to retain customers and maintain long-term profitability. # The dataset used in this analysis contains information about bank customers, including their demographic details, credit score, account balance, tenure, and various other features. The target variable is "Exited," which indicates whether a customer has churned (1) or not (0). By exploring the data and performing data analysis, feature engineering, and predictive modeling, we aim to identify patterns and factors associated with customer churn. # **The notebook covers the following key steps:** # * **Data Loading and Exploration:** Load the dataset and perform initial exploration to understand its structure, feature types, and any missing values. # * **Exploratory Data Analysis (EDA):** Conduct an in-depth analysis of the dataset, including visualizations and statistical summaries, to gain insights into the distribution and relationships between variables. # * **Feature Engineering:** Transform and engineer new features based on the existing variables to capture additional information and improve the predictive power of the model. # * **Feature Selection:** Select the most relevant features that have a significant impact on customer churn prediction. # * **Predictive Modeling:** Build and evaluate predictive models using machine learning algorithms to predict customer churn and measure their performance. # * **Model Interpretation and Insights:** Analyze the model results, feature importances, and coefficients to extract meaningful insights and actionable recommendations to reduce churn and improve customer retention. # Throughout the notebook, code snippets, visualizations, and explanations will be provided to guide the analysis process. Let's get started with loading and exploring the dataset! # Feel free to customize the introduction based on the specific analysis and insights you plan to cover in your notebook. # Import necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.feature_selection import SelectKBest, f_classif from sklearn.ensemble import RandomForestClassifier # Load the dataset data = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") # Display the first few rows of the dataset print(data.head()) # Check the dimensions of the dataset print("Number of rows:", data.shape[0]) print("Number of columns:", data.shape[1]) print(data.columns) # Check the data types of each column print(data.dtypes) # Check for missing values print(data.isnull().sum()) # Summary statistics print(data.describe()) # # Data visualization # Churn distribution plt.figure(figsize=(6, 6)) churn_counts = data["Exited"].value_counts() plt.pie(churn_counts, labels=["Retained", "Churned"], autopct="%1.1f%%", startangle=90) plt.axis("equal") plt.title("Churn Distribution") plt.show() # Churn by gender plt.figure(figsize=(8, 6)) sns.countplot(x="Gender", hue="Exited", data=data) plt.title("Churn by Gender") plt.show() # Churn by geography plt.figure(figsize=(8, 6)) sns.countplot(x="Geography", hue="Exited", data=data) plt.title("Churn by Geography") plt.show() # Churn by age group data["AgeGroup"] = pd.cut( data["Age"], bins=[0, 30, 40, 50, 60, np.inf], labels=["<30", "30-40", "40-50", "50-60", "60+"], ) plt.figure(figsize=(8, 6)) sns.countplot(x="AgeGroup", hue="Exited", data=data) plt.title("Churn by Age Group") plt.show() # # Insights # Churn rate by gender churn_rate_gender = data.groupby("Gender")["Exited"].mean() print(churn_rate_gender) # Based on the provided dataset, the churn rate by gender is as follows: # * **Female: 25.1%** # * **Male: 16.5%** # This means that the churn rate for female customers is higher (25.1%) compared to male customers (16.5%). # Churn rate by geography churn_rate_geography = data.groupby("Geography")["Exited"].mean() print(churn_rate_geography) # Based on the provided dataset, the churn rate by geography is as follows: # * **France: 16.2%** # * **Germany: 32.4%** # * **Spain: 16.7%** # This indicates that the churn rate is highest in Germany (32.4%), followed by Spain (16.7%) and France (16.2%). # Churn rate by age group churn_rate_age = data.groupby("AgeGroup")["Exited"].mean() print(churn_rate_age) # # Based on the provided dataset, the churn rate by age group is as follows: # * **<30: 7.5%** # * **30-40: 12.1%** # * **40-50: 33.9%** # * **50-60: 56.2%** # * **60+: 24.8%** # These churn rates indicate that the highest churn rate is observed in the 50-60 age group (56.2%), followed by the 40-50 age group (33.9%). The lowest churn rate is observed in the <30 age group (7.5%). # Churn by Number of Products: plt.figure(figsize=(8, 6)) sns.countplot(x="NumOfProducts", hue="Exited", data=data) plt.title("Churn by Number of Products") plt.show() # Churn by Credit Card plt.figure(figsize=(8, 6)) sns.countplot(x="HasCrCard", hue="Exited", data=data) plt.title("Churn by Credit Card") plt.show() # Churn by Activity Status plt.figure(figsize=(8, 6)) sns.countplot(x="IsActiveMember", hue="Exited", data=data) plt.title("Churn by Activity Status") plt.show() # Churn by Balance: plt.figure(figsize=(10, 6)) sns.boxplot(x="Exited", y="Balance", data=data) plt.title("Churn by Balance") plt.show()
false
1
1,730
19
2,230
1,730
129264144
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/vimtotweets-without-duplication/VimtoTweets_PreProcessedData.csv" ) print(len(df)) pd.set_option("display.max_colwidth", None) pd.set_option("display.max_rows", None) df.head(10) df.describe() # Importing needed libraries from collections import OrderedDict import pyarabic.araby as araby # Python Library for Arabic import itertools # Functions creating iterators for efficient looping import re # Regular Expressions import sys # System-specific parameters and functions # Remove Diacritics df["Tweets"] = df["Tweets"].apply(araby.strip_tashkeel) # Remove repeated letters def remove_repeated(Text): return "".join(c for c, _ in itertools.groupby(Text)) df["Tweets"] = df["Tweets"].apply(remove_repeated) # Normalize arabic letters (unify shapes) def unify_shapes(Text): Text = re.sub("ة", "ه", Text) Text = re.sub("گ", "ك", Text) Text = araby.normalize_alef(Text) Text = araby.normalize_hamza(Text) Text = araby.normalize_ligature(Text) return Text df["Tweets"] = df["Tweets"].apply(unify_shapes) # df = df[df['Tweets'].str.contains("صحون")] # pd.set_option('display.max_colwidth', None) # df.head(10) # Dictionaty based normalization (unify most appeared words shapes) def unify_words(Text): Text = re.sub("سنبوسه", "سمبوسه", Text) Text = re.sub("طماط", "طماطم", Text) Text = re.sub("بطاطا", "بطاطس", Text) Text = re.sub("مويا", "ماء", Text) Text = re.sub("مويه", "ماء", Text) Text = re.sub("شوربا", "شوربه", Text) Text = re.sub("شربه", "شوربه", Text) Text = re.sub("كاسه", "كوب", Text) Text = re.sub("كاسات", "اكواب", Text) Text = re.sub("صحن", "طبق", Text) Text = re.sub("صحون", "اطباق", Text) return Text df["Tweets"] = df["Tweets"].apply(unify_words) pd.set_option("display.max_colwidth", None) df.head(10) df.to_csv("data_with_normalization.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264144.ipynb
null
null
[{"Id": 129264144, "ScriptId": 37935931, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13877926, "CreationDate": "05/12/2023 09:30:23", "VersionNumber": 1.0, "Title": "Cloud_Normalization", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 80.0, "LinesInsertedFromPrevious": 80.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/vimtotweets-without-duplication/VimtoTweets_PreProcessedData.csv" ) print(len(df)) pd.set_option("display.max_colwidth", None) pd.set_option("display.max_rows", None) df.head(10) df.describe() # Importing needed libraries from collections import OrderedDict import pyarabic.araby as araby # Python Library for Arabic import itertools # Functions creating iterators for efficient looping import re # Regular Expressions import sys # System-specific parameters and functions # Remove Diacritics df["Tweets"] = df["Tweets"].apply(araby.strip_tashkeel) # Remove repeated letters def remove_repeated(Text): return "".join(c for c, _ in itertools.groupby(Text)) df["Tweets"] = df["Tweets"].apply(remove_repeated) # Normalize arabic letters (unify shapes) def unify_shapes(Text): Text = re.sub("ة", "ه", Text) Text = re.sub("گ", "ك", Text) Text = araby.normalize_alef(Text) Text = araby.normalize_hamza(Text) Text = araby.normalize_ligature(Text) return Text df["Tweets"] = df["Tweets"].apply(unify_shapes) # df = df[df['Tweets'].str.contains("صحون")] # pd.set_option('display.max_colwidth', None) # df.head(10) # Dictionaty based normalization (unify most appeared words shapes) def unify_words(Text): Text = re.sub("سنبوسه", "سمبوسه", Text) Text = re.sub("طماط", "طماطم", Text) Text = re.sub("بطاطا", "بطاطس", Text) Text = re.sub("مويا", "ماء", Text) Text = re.sub("مويه", "ماء", Text) Text = re.sub("شوربا", "شوربه", Text) Text = re.sub("شربه", "شوربه", Text) Text = re.sub("كاسه", "كوب", Text) Text = re.sub("كاسات", "اكواب", Text) Text = re.sub("صحن", "طبق", Text) Text = re.sub("صحون", "اطباق", Text) return Text df["Tweets"] = df["Tweets"].apply(unify_words) pd.set_option("display.max_colwidth", None) df.head(10) df.to_csv("data_with_normalization.csv", index=False)
false
0
881
0
881
881
129264082
<jupyter_start><jupyter_text>T2Idataset Kaggle dataset identifier: t2idataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory # import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import cv2 import numpy as np import random from tensorflow.keras.preprocessing.image import ImageDataGenerator def load_data( data_dir, train_txt, test_txt, img_size=128, grayscale=True, normalize=True, remove_invalid=True, augment=False, ): train_images = [] train_labels = [] train_class = [] test_images = [] test_labels = [] test_class = [] # Read train and test txt files with open(train_txt, "r") as f: train_files = f.read().splitlines() with open(test_txt, "r") as f: test_files = f.read().splitlines() # Load train images and labels for file in train_files: img_path, labels_str = file.split(" ") labels = np.array([int(label) for label in labels_str.split(",")]) img = cv2.imread(os.path.join(data_dir, img_path)) if img is not None: if grayscale: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img_size: img = cv2.resize(img, (img_size, img_size)) if normalize: img = img / 255.0 train_images.append(img) train_labels.append(img_path.split("/")[1]) train_class.extend(labels) # train_class.append(np.array([int(label) for label in labels])) # Load test images and labels for file in test_files: img_path, labels_str = file.split(" ") labels = np.array([int(label) for label in labels_str.split(",")]) img = cv2.imread(os.path.join(data_dir, img_path)) if img is not None: if grayscale: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img_size: img = cv2.resize(img, (img_size, img_size)) if normalize: img = img / 255.0 test_images.append(img) test_labels.append(img_path.split("/")[1]) test_class.extend(labels) # test_class.append(np.array([int(label) for label in labels])) # Remove images with invalid labels if remove_invalid: train_images_new = [] train_labels_new = [] train_class_new = [] test_images_new = [] test_labels_new = [] test_class_new = [] for i in range(len(train_images)): if len(train_labels[i]) > 0: train_images_new.append(train_images[i]) train_labels_new.append(train_labels[i]) train_class_new.append(train_class[i]) for i in range(len(test_images)): if len(test_labels[i]) > 0: test_images_new.append(test_images[i]) test_labels_new.append(test_labels[i]) test_class_new.append(test_class[i]) train_images = train_images_new train_labels = train_labels_new train_class = train_class_new test_images = test_images_new test_labels = test_labels_new test_class = test_class_new # Data augmentation if augment: datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True, fill_mode="nearest", ) datagen.fit(train_images) for x_batch, y_batch in datagen.flow( train_images, train_labels, batch_size=len(train_images) ): train_images = np.append(train_images, x_batch, axis=0) train_labels = np.append(train_labels, y_batch, axis=0) break # Convert lists to numpy arrays train_images = np.array(train_images) train_labels = np.array(train_labels) test_images = np.array(test_images) test_labels = np.array(test_labels) train_class = np.array(train_class) test_class = np.array(test_class) return train_images, train_labels, test_images, test_labels, train_class, test_class data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" ( train_images, train_labels, test_images, test_labels, train_class, test_class, ) = load_data(data_dir, train_txt, test_txt) # print(len(train_images)) # print(len(train_labels)) # print((test_images)) print((test_labels)) # print(len(train_class)) print((test_class)) print(train_images.shape) print(train_labels.shape) print(test_images.shape) print(test_labels.shape) print(train_class.shape) print(test_class.shape) print(len(np.unique(train_labels))) import tensorflow as tf import numpy as np import cv2 from tensorflow.keras import layers from sklearn.preprocessing import LabelEncoder from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Embedding, Flatten, Reshape, Concatenate, BatchNormalization, ) from tensorflow.keras.layers import ( UpSampling2D, Conv2D, Activation, ZeroPadding2D, Conv2DTranspose, LeakyReLU, Dropout, ) from tensorflow.keras.optimizers import Adam # Load data data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" # train_images, train_labels, test_images, test_labels, train_class, test_class = load_data(data_dir, train_txt, test_txt) # Define the generator model def build_generator(latent_dim, num_classes): # Concatenate the noise vector and class label as input latent_input = layers.Input(shape=(latent_dim,)) class_label = layers.Input(shape=(1,), dtype="int32") emb = layers.Embedding(num_classes, latent_dim)(class_label) merged_input = layers.multiply([latent_input, emb]) # Generate the image x = layers.Dense(7 * 7 * 128, use_bias=False)(merged_input) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) x = layers.Reshape((7, 7, 128))(x) x = layers.Conv2DTranspose( 64, (5, 5), strides=(2, 2), padding="same", use_bias=False )(x) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) generated_image = layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding="same", activation="tanh" )(x) # Define the generator model generator_model = tf.keras.Model( inputs=[latent_input, class_label], outputs=generated_image ) return generator_model # Define the discriminator model def build_discriminator(): # Image input image_input = layers.Input(shape=(128, 128, 1)) # Discriminator layers x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding="same")(image_input) x = layers.LeakyReLU()(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(128, (5, 5), strides=(2, 2), padding="same")(x) x = layers.LeakyReLU()(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x = layers.Dense(1)(x) # Define the discriminator model discriminator_model = tf.keras.Model(inputs=image_input, outputs=x) return discriminator_model # Define the CGAN model class CGAN(tf.keras.Model): def __init__(self, generator, discriminator): super(CGAN, self).__init__() self.generator = generator self.discriminator = discriminator def compile(self, gen_optimizer, disc_optimizer, loss_fn): super(CGAN, self).compile() self.gen_optimizer = gen_optimizer self.disc_optimizer = disc_optimizer self.loss_fn = loss_fn def train_step(self, data): real_images, real_labels, real_class_labels = data # Add real_class_labels # Generate random noise batch_size = tf.shape(real_images)[0] latent_dim = self.generator.input_shape[0][1] random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Generate fake images generated_images = self.generator( [random_latent_vectors, real_class_labels] ) # Pass real_class_labels generated_images = tf.image.resize(generated_images, (128, 128)) # Concatenate real and fake images combined_images = tf.concat([generated_images, real_images], axis=0) # Create labels for the discriminator real_labels = tf.ones((batch_size, 1)) fake_labels = tf.zeros((batch_size, 1)) combined_labels = tf.concat([fake_labels, real_labels], axis=0) # Train the discriminator with tf.GradientTape() as disc_tape: disc_predictions = self.discriminator(combined_images) disc_loss = self.loss_fn(combined_labels, disc_predictions) gradients_of_discriminator = disc_tape.gradient( disc_loss, self.discriminator.trainable_variables ) self.disc_optimizer.apply_gradients( zip(gradients_of_discriminator, self.discriminator.trainable_variables) ) # Train the generator with tf.GradientTape() as gen_tape: gen_predictions = self.discriminator( [random_latent_vectors, real_class_labels] ) # Pass real_class_labels gen_loss = self.loss_fn(real_labels, gen_predictions) gradients_of_generator = gen_tape.gradient( gen_loss, self.generator.trainable_variables ) self.gen_optimizer.apply_gradients( zip(gradients_of_generator, self.generator.trainable_variables) ) return {"gen_loss": gen_loss, "disc_loss": disc_loss} # Create an instance of the CGAN model latent_dim = 100 # Latent space dimension num_classes = 581 # Number of classes generator = build_generator(latent_dim, num_classes) discriminator = build_discriminator() cgan = CGAN(generator, discriminator) # Compile the CGAN model gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5) disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5) loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True) # Create a dictionary mapping class names to integer labels label_map = {class_name: i for i, class_name in enumerate(np.unique(train_class))} default_label = len(label_map) # Assign a default label for unknown classes # Convert string labels to integer labels train_labels = np.array( [label_map.get(class_name, default_label) for class_name in train_class] ) test_labels = np.array( [label_map.get(class_name, default_label) for class_name in test_class] ) # Convert the labels to categorical format # num_classes = len(np.unique(train_class)) train_labels = tf.keras.utils.to_categorical(train_labels, num_classes) test_labels = tf.keras.utils.to_categorical(test_labels, num_classes) # Train the CGAN model cgan.compile(gen_optimizer, disc_optimizer, loss_fn) # Create TensorFlow Dataset objects train_dataset = tf.data.Dataset.from_tensor_slices( (train_images, train_labels, train_class) ) train_dataset = train_dataset.shuffle(buffer_size=10000).batch(batch_size) # Train the CGAN model cgan.fit(train_dataset, epochs=epochs) # Generate random noise random_latent_vectors = tf.random.normal(shape=(num_samples, latent_dim)) # Generate random class labels random_class_labels = tf.random.uniform( shape=(num_samples, 1), minval=0, maxval=num_classes, dtype=tf.int32 ) # Generate samples generated_samples = generator.predict([random_latent_vectors, random_class_labels]) # Display the generated samples import matplotlib.pyplot as plt fig, axes = plt.subplots(1, num_samples, figsize=(10, 2)) for i in range(num_samples): axes[i].imshow(generated_samples[i, :, :, 0], cmap="gray") axes[i].axis("off") plt.show() def build_generator(latent_dim, num_classes): input_noise = layers.Input(shape=(latent_dim,)) input_label = layers.Input(shape=(num_classes,)) label_embeddings = layers.Dense(32)(input_label) label_embeddings = layers.Dense(128 * 128, activation="relu")(label_embeddings) label_embeddings = layers.Reshape((128, 128, 1))(label_embeddings) x = layers.Embedding(num_classes, 128 * 128)(input_label) x = layers.Reshape((128, 128, 1))(x) x = layers.Conv2DTranspose( 64, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.Conv2DTranspose( 32, kernel_size=4, strides=2, padding="same", activation="relu" )(x) combined = layers.Concatenate()([x, label_embeddings]) x = layers.Conv2DTranspose( 16, kernel_size=4, strides=2, padding="same", activation="relu" )(combined) generated_image = layers.Conv2D( 1, kernel_size=3, padding="same", activation="tanh" )(x) generator = tf.keras.Model( inputs=[input_noise, input_label], outputs=generated_image ) return generator def build_discriminator(input_shape, num_classes): input_image = layers.Input(shape=input_shape) input_label = layers.Input(shape=(num_classes,)) x = layers.Conv2D(16, kernel_size=3, strides=2, padding="same", activation="relu")( input_image ) x = layers.Conv2D(32, kernel_size=3, strides=2, padding="same", activation="relu")( x ) x = layers.Flatten()(x) x = layers.Dense(64, activation="relu")(x) label_embedding = layers.Dense(64)(input_label) label_embedding = layers.Dense(64)(label_embedding) label_embedding = layers.Reshape((1, 1, 64))(label_embedding) label_embedding = tf.tile(label_embedding, [1, 32, 32, 1]) combined = layers.Concatenate()([x, label_embedding]) x = layers.Dense(64, activation="relu")(combined) validity = layers.Dense(1)(x) discriminator = tf.keras.Model(inputs=[input_image, input_label], outputs=validity) return discriminator # Define the generator model def build_generator(latent_dim, num_classes): input_noise = layers.Input(shape=(latent_dim,)) input_label = layers.Input(shape=(num_classes,)) # Reshape label embeddings to (batch_size, 1, 1, num_classes) label_embeddings = layers.Reshape((1, 1, num_classes))(input_label) label_embeddings = layers.BatchNormalization()(label_embeddings) # Project noise input to 4x4x256 x = layers.Dense(4 * 4 * 256, activation="relu")(input_noise) x = layers.Reshape((4, 4, 256))(x) x = layers.BatchNormalization()(x) # Concatenate noise and label embeddings x = layers.Concatenate()([x, label_embeddings]) # Upsample to 8x8x256 x = layers.Conv2DTranspose( 256, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Upsample to 16x16x128 x = layers.Conv2DTranspose( 128, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Upsample to 32x32x64 x = layers.Conv2DTranspose( 64, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Output layer output = layers.Conv2DTranspose( 3, kernel_size=4, strides=2, padding="same", activation="tanh" )(x) # Define the model model = keras.models.Model( inputs=[input_noise, input_label], outputs=output, name="generator" ) return model # Define the discriminator model def build_discriminator(input_shape, num_classes): input_image = layers.Input(shape=input_shape) # Downsample input image to 64x64x64 x = layers.Conv2D(64, kernel_size=4, strides=2, padding="same", activation="relu")( input_image ) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 32x32x128 x = layers.Conv2D(128, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 16x16x256 x = layers.Conv2D(256, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 8x8x512 x = layers.Conv2D(512, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Flatten the feature maps x = layers.Flatten()(x) # Output layer for classification output_validity = layers.Dense(1, activation="sigmoid")(x) # Output layer for label prediction output_label = layers.Dense(num_classes, activation="softmax")(x) # Define the model model = keras.models.Model( inputs=input_image, outputs=[output_validity, output_label], name="discriminator", ) return model # Build the CGAN model def build_cgan(generator, discriminator): # Generator input layers noise = generator.input[0] label = generator.input[1] # Generator output generated_image = generator([noise, label]) # Freeze the discriminator during generator training discriminator.trainable = False # Discriminator input layers real_image = layers.Input(shape=input_shape) label = layers.Input(shape=(num_classes,)) # Discriminator outputs for real and generated images real_output = discriminator(real_image) generated_output = discriminator(generated_image) # Define the CGAN model cgan_model = tf.keras.Model( inputs=[noise, label, real_image], outputs=[generated_output, real_output] ) return cgan_model import tensorflow as tf import numpy as np import cv2 from tensorflow.keras import layers from sklearn.preprocessing import LabelEncoder from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Embedding, Flatten, Reshape, Concatenate, BatchNormalization, ) from tensorflow.keras.layers import ( UpSampling2D, Conv2D, Activation, ZeroPadding2D, Conv2DTranspose, LeakyReLU, Dropout, ) from tensorflow.keras.optimizers import Adam # Load data data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" # train_images, train_labels, test_images, test_labels, train_class, test_class = load_data(data_dir, train_txt, test_txt) def build_generator(latent_dim, num_classes): # Input layers noise = layers.Input(shape=(latent_dim,)) label = layers.Input(shape=(1,)) # Embed the label label_emb = layers.Embedding(num_classes, latent_dim)(label) label_emb = layers.Flatten()(label_emb) # Concatenate noise and label as input input_concat = layers.Concatenate()([noise, label_emb]) # Dense layer model = layers.Dense(128 * 128)(input_concat) model = layers.LeakyReLU(alpha=0.2)(model) model = layers.Reshape((128, 128, 1))(model) # Output layer generated_image = layers.Conv2D( 3, kernel_size=1, padding="same", activation="tanh" )(model) # Define the generator model generator_model = tf.keras.Model(inputs=[noise, label], outputs=generated_image) return generator_model # Define the discriminator model def build_discriminator(input_shape, num_classes): # Input layers image = layers.Input(shape=input_shape) label = layers.Input(shape=(581,)) # Embed the label label_emb = layers.Embedding(num_classes, np.prod(input_shape))(label) label_emb = layers.Reshape(input_shape)(label_emb) # label_emb = layers.Flatten()(label_emb) # Concatenate image and label combined_input = layers.Concatenate()([image, label_emb]) # Convolutional layers model = layers.Conv2D(128, kernel_size=4, strides=2, padding="same")(image) model = layers.LeakyReLU(0.2)(model) model = layers.Conv2D(256, kernel_size=4, strides=2, padding="same")(model) model = layers.LeakyReLU(0.2)(model) model = layers.BatchNormalization()(model) model = layers.Conv2D(512, kernel_size=4, strides=2, padding="same")(model) model = layers.LeakyReLU(0.2)(model) model = layers.BatchNormalization()(model) # Flatten layer model = layers.Flatten()(model) # Output layer validity = layers.Dense(1, activation="sigmoid")(model) # Define the discriminator model discriminator_model = tf.keras.Model(inputs=[image, label], outputs=validity) return discriminator_model # Build the CGAN model def build_cgan(generator, discriminator): # Set the discriminator weights as non-trainable discriminator.trainable = False # Input layers noise = generator.input[0] label = generator.input[1] # Generate image generated_image = generator([noise, label]) # Get validity score validity = discriminator([generated_image, label]) # Define the CGAN model cgan_model = tf.keras.Model(inputs=[noise, label], outputs=validity) return cgan_model # Hyperparameters latent_dim = 100 input_shape = (128, 128, 3) batch_size = 64 epochs = 50 # Encode the train_class labels label_encoder = LabelEncoder() train_class_encoded = label_encoder.fit_transform(train_class) # Convert string labels to numerical labels label_mapping = {label: index for index, label in enumerate(np.unique(train_labels))} train_labels_numerical = np.array([label_mapping[label] for label in train_labels]) # Get the actual number of unique classes num_classes = len(label_mapping) # num class: 581 # Convert train_labels_numerical to one-hot encoding train_labels_encoded = tf.keras.utils.to_categorical( train_labels_numerical, num_classes ) # Normalize train_images to the range [-1, 1] train_images = (train_images - 0.5) * 2.0 generator = build_generator(latent_dim, num_classes) discriminator = build_discriminator(input_shape, num_classes) discriminator.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5), metrics=["accuracy"], ) # Build and compile the GAN model gan = build_cgan(generator, discriminator) gan.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy", ) # Train the GAN model def train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ): num_batches = train_images.shape[0] // batch_size # Training loop for epoch in range(epochs): for batch in range(num_batches): # --------------------- # Train the discriminator # --------------------- # Select a random batch of images and labels idx = np.random.randint(0, train_images.shape[0], batch_size) real_images = train_images[idx] real_labels = train_labels_encoded[idx] # Generate a batch of fake images and labels noise = np.random.normal(0, 1, (batch_size, latent_dim)) fake_labels = np.random.randint(0, num_classes, (batch_size, 1)) # fake_labels = np.squeeze(fake_labels, axis=1) # Reshape the fake_labels tensor fake_images = generator.predict([noise, fake_labels]) # Increase dimensions of real_images real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) # Concatenate real and fake images and labels print(real_labels.shape) print(fake_labels.shape) print(real_images_expanded.shape) print(fake_images.shape) # all_images = np.concatenate((real_images_expanded, fake_images),axis=0) # all_labels = np.concatenate((real_labels, fake_labels),axis=0) # Label smoothing: assign 0.9 for real images and 0.0 for fake images real_labels_smoothed = real_labels * 0.9 # Train the discriminator discriminator_loss_real = discriminator.train_on_batch( real_images, real_labels_smoothed ) discriminator_loss_fake = discriminator.train_on_batch( fake_images, np.zeros((batch_size, 1)) ) discriminator_loss = 0.5 * np.add( discriminator_loss_real, discriminator_loss_fake ) # ----------------- # Train the generator # ----------------- # Generate a batch of noise vectors noise = np.random.normal(0, 1, (batch_size, latent_dim)) # Train the generator generator_loss = gan.train_on_batch( [noise, fake_labels], np.ones((batch_size, 1)) ) # Print training progress print( f"Epoch {epoch+1}/{epochs}, Batch {batch+1}/{num_batches}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}" ) # Train the GAN model train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ) latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) fake_images = generator.predict([latent_vectors, class_labels]) print(fake_images.shape) print(real_images_expanded.shape) batch_indices = np.random.randint(0, train_images.shape[0], size=batch_size) real_images = train_images[batch_indices] real_labels = train_labels_encoded[batch_indices] print(train_labels_encoded.shape) print(real_labels.shape) all_labels = np.concatenate((real_labels, train_labels_encoded), axis=0) print(all_labels.shape) def train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ): num_batches = train_images.shape[0] // batch_size for epoch in range(epochs): for batch in range(num_batches): # Generate random latent vectors and class labels latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) # Generate fake images fake_images = generator.predict([latent_vectors, class_labels]) # Select a random batch of real images and their corresponding labels batch_indices = np.random.randint(0, train_images.shape[0], size=batch_size) real_images = train_images[batch_indices] real_labels = train_labels_encoded[batch_indices] # print(real_images) print(real_labels) # print(train) # Increase dimensions of real_images real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) # print(real_labels.shape) print("class") print(train_class_encoded) print("train_labels") print(train_labels_encoded) # Generate batch-sized labels for the GAN batch_labels_encoded = np.random.randint( 0, num_classes, size=(batch_size, 1) ) batch_labels_encoded = tf.keras.utils.to_categorical( batch_labels_encoded, num_classes ) # Repeat the labels for each image in the batch all_labels = np.repeat(batch_labels_encoded, real_images.shape[0], axis=0) # Concatenate real and fake images and labels all_images = np.concatenate((real_images_expanded, fake_images), axis=0) # all_labels = np.concatenate((real_labels, train_labels_encoded),axis=0) # Label smoothing: assign 0.9 for real images and 0.0 for fake images real_labels_smoothed = real_labels * 0.9 fake_labels = np.zeros_like(train_class_encoded) # Train the discriminator discriminator_loss_real = discriminator.train_on_batch( all_images, all_labels ) discriminator_loss_fake = discriminator.train_on_batch( fake_images, fake_labels ) discriminator_loss = 0.5 * np.add( discriminator_loss_real, discriminator_loss_fake ) # Generate new random latent vectors and class labels latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) # Update the generator via the whole GAN model gan_labels = np.ones_like(class_labels) generator_loss = gan.train_on_batch( [latent_vectors, class_labels], gan_labels ) # Print the losses for each epoch print( f"Epoch {epoch+1}/{epochs}, Batch {batch+1}/{num_batches}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}" ) train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ) generator.summary() discriminator.summary() gan.summary() generator.save("generator_model.h5") def generate_images(generator, latent_dim, num_images): latent_vectors = generate_latent_vectors(num_images, latent_dim) generated_images = generator.predict(latent_vectors) return generated_images num_generated_images = 10 generated_images = generate_images(generator, latent_dim, num_generated_images) import matplotlib.pyplot as plt fig, axs = plt.subplots(2, 5, figsize=(10, 4)) fig.subplots_adjust(hspace=0.4, wspace=0.4) for i in range(2): for j in range(5): axs[i, j].imshow(generated_images[i * 5 + j]) axs[i, j].axis("off") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264082.ipynb
t2idataset
shaikkhais
[{"Id": 129264082, "ScriptId": 38301912, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11108041, "CreationDate": "05/12/2023 09:29:53", "VersionNumber": 2.0, "Title": "T2Isyn2", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 681.0, "LinesInsertedFromPrevious": 449.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 232.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185153205, "KernelVersionId": 129264082, "SourceDatasetVersionId": 5345789}, {"Id": 185153206, "KernelVersionId": 129264082, "SourceDatasetVersionId": 5579896}]
[{"Id": 5345789, "DatasetId": 3103855, "DatasourceVersionId": 5419191, "CreatorUserId": 11108041, "LicenseName": "Unknown", "CreationDate": "04/08/2023 10:37:53", "VersionNumber": 1.0, "Title": "T2Idataset", "Slug": "t2idataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3103855, "CreatorUserId": 11108041, "OwnerUserId": 11108041.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5345789.0, "CurrentDatasourceVersionId": 5419191.0, "ForumId": 3167119, "Type": 2, "CreationDate": "04/08/2023 10:37:53", "LastActivityDate": "04/08/2023", "TotalViews": 99, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 0}]
[{"Id": 11108041, "UserName": "shaikkhais", "DisplayName": "Shaik Khais", "RegisterDate": "07/21/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory # import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import cv2 import numpy as np import random from tensorflow.keras.preprocessing.image import ImageDataGenerator def load_data( data_dir, train_txt, test_txt, img_size=128, grayscale=True, normalize=True, remove_invalid=True, augment=False, ): train_images = [] train_labels = [] train_class = [] test_images = [] test_labels = [] test_class = [] # Read train and test txt files with open(train_txt, "r") as f: train_files = f.read().splitlines() with open(test_txt, "r") as f: test_files = f.read().splitlines() # Load train images and labels for file in train_files: img_path, labels_str = file.split(" ") labels = np.array([int(label) for label in labels_str.split(",")]) img = cv2.imread(os.path.join(data_dir, img_path)) if img is not None: if grayscale: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img_size: img = cv2.resize(img, (img_size, img_size)) if normalize: img = img / 255.0 train_images.append(img) train_labels.append(img_path.split("/")[1]) train_class.extend(labels) # train_class.append(np.array([int(label) for label in labels])) # Load test images and labels for file in test_files: img_path, labels_str = file.split(" ") labels = np.array([int(label) for label in labels_str.split(",")]) img = cv2.imread(os.path.join(data_dir, img_path)) if img is not None: if grayscale: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img_size: img = cv2.resize(img, (img_size, img_size)) if normalize: img = img / 255.0 test_images.append(img) test_labels.append(img_path.split("/")[1]) test_class.extend(labels) # test_class.append(np.array([int(label) for label in labels])) # Remove images with invalid labels if remove_invalid: train_images_new = [] train_labels_new = [] train_class_new = [] test_images_new = [] test_labels_new = [] test_class_new = [] for i in range(len(train_images)): if len(train_labels[i]) > 0: train_images_new.append(train_images[i]) train_labels_new.append(train_labels[i]) train_class_new.append(train_class[i]) for i in range(len(test_images)): if len(test_labels[i]) > 0: test_images_new.append(test_images[i]) test_labels_new.append(test_labels[i]) test_class_new.append(test_class[i]) train_images = train_images_new train_labels = train_labels_new train_class = train_class_new test_images = test_images_new test_labels = test_labels_new test_class = test_class_new # Data augmentation if augment: datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True, fill_mode="nearest", ) datagen.fit(train_images) for x_batch, y_batch in datagen.flow( train_images, train_labels, batch_size=len(train_images) ): train_images = np.append(train_images, x_batch, axis=0) train_labels = np.append(train_labels, y_batch, axis=0) break # Convert lists to numpy arrays train_images = np.array(train_images) train_labels = np.array(train_labels) test_images = np.array(test_images) test_labels = np.array(test_labels) train_class = np.array(train_class) test_class = np.array(test_class) return train_images, train_labels, test_images, test_labels, train_class, test_class data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" ( train_images, train_labels, test_images, test_labels, train_class, test_class, ) = load_data(data_dir, train_txt, test_txt) # print(len(train_images)) # print(len(train_labels)) # print((test_images)) print((test_labels)) # print(len(train_class)) print((test_class)) print(train_images.shape) print(train_labels.shape) print(test_images.shape) print(test_labels.shape) print(train_class.shape) print(test_class.shape) print(len(np.unique(train_labels))) import tensorflow as tf import numpy as np import cv2 from tensorflow.keras import layers from sklearn.preprocessing import LabelEncoder from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Embedding, Flatten, Reshape, Concatenate, BatchNormalization, ) from tensorflow.keras.layers import ( UpSampling2D, Conv2D, Activation, ZeroPadding2D, Conv2DTranspose, LeakyReLU, Dropout, ) from tensorflow.keras.optimizers import Adam # Load data data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" # train_images, train_labels, test_images, test_labels, train_class, test_class = load_data(data_dir, train_txt, test_txt) # Define the generator model def build_generator(latent_dim, num_classes): # Concatenate the noise vector and class label as input latent_input = layers.Input(shape=(latent_dim,)) class_label = layers.Input(shape=(1,), dtype="int32") emb = layers.Embedding(num_classes, latent_dim)(class_label) merged_input = layers.multiply([latent_input, emb]) # Generate the image x = layers.Dense(7 * 7 * 128, use_bias=False)(merged_input) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) x = layers.Reshape((7, 7, 128))(x) x = layers.Conv2DTranspose( 64, (5, 5), strides=(2, 2), padding="same", use_bias=False )(x) x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) generated_image = layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding="same", activation="tanh" )(x) # Define the generator model generator_model = tf.keras.Model( inputs=[latent_input, class_label], outputs=generated_image ) return generator_model # Define the discriminator model def build_discriminator(): # Image input image_input = layers.Input(shape=(128, 128, 1)) # Discriminator layers x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding="same")(image_input) x = layers.LeakyReLU()(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(128, (5, 5), strides=(2, 2), padding="same")(x) x = layers.LeakyReLU()(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x = layers.Dense(1)(x) # Define the discriminator model discriminator_model = tf.keras.Model(inputs=image_input, outputs=x) return discriminator_model # Define the CGAN model class CGAN(tf.keras.Model): def __init__(self, generator, discriminator): super(CGAN, self).__init__() self.generator = generator self.discriminator = discriminator def compile(self, gen_optimizer, disc_optimizer, loss_fn): super(CGAN, self).compile() self.gen_optimizer = gen_optimizer self.disc_optimizer = disc_optimizer self.loss_fn = loss_fn def train_step(self, data): real_images, real_labels, real_class_labels = data # Add real_class_labels # Generate random noise batch_size = tf.shape(real_images)[0] latent_dim = self.generator.input_shape[0][1] random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Generate fake images generated_images = self.generator( [random_latent_vectors, real_class_labels] ) # Pass real_class_labels generated_images = tf.image.resize(generated_images, (128, 128)) # Concatenate real and fake images combined_images = tf.concat([generated_images, real_images], axis=0) # Create labels for the discriminator real_labels = tf.ones((batch_size, 1)) fake_labels = tf.zeros((batch_size, 1)) combined_labels = tf.concat([fake_labels, real_labels], axis=0) # Train the discriminator with tf.GradientTape() as disc_tape: disc_predictions = self.discriminator(combined_images) disc_loss = self.loss_fn(combined_labels, disc_predictions) gradients_of_discriminator = disc_tape.gradient( disc_loss, self.discriminator.trainable_variables ) self.disc_optimizer.apply_gradients( zip(gradients_of_discriminator, self.discriminator.trainable_variables) ) # Train the generator with tf.GradientTape() as gen_tape: gen_predictions = self.discriminator( [random_latent_vectors, real_class_labels] ) # Pass real_class_labels gen_loss = self.loss_fn(real_labels, gen_predictions) gradients_of_generator = gen_tape.gradient( gen_loss, self.generator.trainable_variables ) self.gen_optimizer.apply_gradients( zip(gradients_of_generator, self.generator.trainable_variables) ) return {"gen_loss": gen_loss, "disc_loss": disc_loss} # Create an instance of the CGAN model latent_dim = 100 # Latent space dimension num_classes = 581 # Number of classes generator = build_generator(latent_dim, num_classes) discriminator = build_discriminator() cgan = CGAN(generator, discriminator) # Compile the CGAN model gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5) disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5) loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True) # Create a dictionary mapping class names to integer labels label_map = {class_name: i for i, class_name in enumerate(np.unique(train_class))} default_label = len(label_map) # Assign a default label for unknown classes # Convert string labels to integer labels train_labels = np.array( [label_map.get(class_name, default_label) for class_name in train_class] ) test_labels = np.array( [label_map.get(class_name, default_label) for class_name in test_class] ) # Convert the labels to categorical format # num_classes = len(np.unique(train_class)) train_labels = tf.keras.utils.to_categorical(train_labels, num_classes) test_labels = tf.keras.utils.to_categorical(test_labels, num_classes) # Train the CGAN model cgan.compile(gen_optimizer, disc_optimizer, loss_fn) # Create TensorFlow Dataset objects train_dataset = tf.data.Dataset.from_tensor_slices( (train_images, train_labels, train_class) ) train_dataset = train_dataset.shuffle(buffer_size=10000).batch(batch_size) # Train the CGAN model cgan.fit(train_dataset, epochs=epochs) # Generate random noise random_latent_vectors = tf.random.normal(shape=(num_samples, latent_dim)) # Generate random class labels random_class_labels = tf.random.uniform( shape=(num_samples, 1), minval=0, maxval=num_classes, dtype=tf.int32 ) # Generate samples generated_samples = generator.predict([random_latent_vectors, random_class_labels]) # Display the generated samples import matplotlib.pyplot as plt fig, axes = plt.subplots(1, num_samples, figsize=(10, 2)) for i in range(num_samples): axes[i].imshow(generated_samples[i, :, :, 0], cmap="gray") axes[i].axis("off") plt.show() def build_generator(latent_dim, num_classes): input_noise = layers.Input(shape=(latent_dim,)) input_label = layers.Input(shape=(num_classes,)) label_embeddings = layers.Dense(32)(input_label) label_embeddings = layers.Dense(128 * 128, activation="relu")(label_embeddings) label_embeddings = layers.Reshape((128, 128, 1))(label_embeddings) x = layers.Embedding(num_classes, 128 * 128)(input_label) x = layers.Reshape((128, 128, 1))(x) x = layers.Conv2DTranspose( 64, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.Conv2DTranspose( 32, kernel_size=4, strides=2, padding="same", activation="relu" )(x) combined = layers.Concatenate()([x, label_embeddings]) x = layers.Conv2DTranspose( 16, kernel_size=4, strides=2, padding="same", activation="relu" )(combined) generated_image = layers.Conv2D( 1, kernel_size=3, padding="same", activation="tanh" )(x) generator = tf.keras.Model( inputs=[input_noise, input_label], outputs=generated_image ) return generator def build_discriminator(input_shape, num_classes): input_image = layers.Input(shape=input_shape) input_label = layers.Input(shape=(num_classes,)) x = layers.Conv2D(16, kernel_size=3, strides=2, padding="same", activation="relu")( input_image ) x = layers.Conv2D(32, kernel_size=3, strides=2, padding="same", activation="relu")( x ) x = layers.Flatten()(x) x = layers.Dense(64, activation="relu")(x) label_embedding = layers.Dense(64)(input_label) label_embedding = layers.Dense(64)(label_embedding) label_embedding = layers.Reshape((1, 1, 64))(label_embedding) label_embedding = tf.tile(label_embedding, [1, 32, 32, 1]) combined = layers.Concatenate()([x, label_embedding]) x = layers.Dense(64, activation="relu")(combined) validity = layers.Dense(1)(x) discriminator = tf.keras.Model(inputs=[input_image, input_label], outputs=validity) return discriminator # Define the generator model def build_generator(latent_dim, num_classes): input_noise = layers.Input(shape=(latent_dim,)) input_label = layers.Input(shape=(num_classes,)) # Reshape label embeddings to (batch_size, 1, 1, num_classes) label_embeddings = layers.Reshape((1, 1, num_classes))(input_label) label_embeddings = layers.BatchNormalization()(label_embeddings) # Project noise input to 4x4x256 x = layers.Dense(4 * 4 * 256, activation="relu")(input_noise) x = layers.Reshape((4, 4, 256))(x) x = layers.BatchNormalization()(x) # Concatenate noise and label embeddings x = layers.Concatenate()([x, label_embeddings]) # Upsample to 8x8x256 x = layers.Conv2DTranspose( 256, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Upsample to 16x16x128 x = layers.Conv2DTranspose( 128, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Upsample to 32x32x64 x = layers.Conv2DTranspose( 64, kernel_size=4, strides=2, padding="same", activation="relu" )(x) x = layers.BatchNormalization()(x) # Output layer output = layers.Conv2DTranspose( 3, kernel_size=4, strides=2, padding="same", activation="tanh" )(x) # Define the model model = keras.models.Model( inputs=[input_noise, input_label], outputs=output, name="generator" ) return model # Define the discriminator model def build_discriminator(input_shape, num_classes): input_image = layers.Input(shape=input_shape) # Downsample input image to 64x64x64 x = layers.Conv2D(64, kernel_size=4, strides=2, padding="same", activation="relu")( input_image ) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 32x32x128 x = layers.Conv2D(128, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 16x16x256 x = layers.Conv2D(256, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Downsample to 8x8x512 x = layers.Conv2D(512, kernel_size=4, strides=2, padding="same", activation="relu")( x ) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(alpha=0.2)(x) # Flatten the feature maps x = layers.Flatten()(x) # Output layer for classification output_validity = layers.Dense(1, activation="sigmoid")(x) # Output layer for label prediction output_label = layers.Dense(num_classes, activation="softmax")(x) # Define the model model = keras.models.Model( inputs=input_image, outputs=[output_validity, output_label], name="discriminator", ) return model # Build the CGAN model def build_cgan(generator, discriminator): # Generator input layers noise = generator.input[0] label = generator.input[1] # Generator output generated_image = generator([noise, label]) # Freeze the discriminator during generator training discriminator.trainable = False # Discriminator input layers real_image = layers.Input(shape=input_shape) label = layers.Input(shape=(num_classes,)) # Discriminator outputs for real and generated images real_output = discriminator(real_image) generated_output = discriminator(generated_image) # Define the CGAN model cgan_model = tf.keras.Model( inputs=[noise, label, real_image], outputs=[generated_output, real_output] ) return cgan_model import tensorflow as tf import numpy as np import cv2 from tensorflow.keras import layers from sklearn.preprocessing import LabelEncoder from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Embedding, Flatten, Reshape, Concatenate, BatchNormalization, ) from tensorflow.keras.layers import ( UpSampling2D, Conv2D, Activation, ZeroPadding2D, Conv2DTranspose, LeakyReLU, Dropout, ) from tensorflow.keras.optimizers import Adam # Load data data_dir = "/kaggle/input/t2idataset/Images" train_txt = "/kaggle/input/t2ilabels/data/label/train.txt" test_txt = "/kaggle/input/t2ilabels/data/label/test.txt" # train_images, train_labels, test_images, test_labels, train_class, test_class = load_data(data_dir, train_txt, test_txt) def build_generator(latent_dim, num_classes): # Input layers noise = layers.Input(shape=(latent_dim,)) label = layers.Input(shape=(1,)) # Embed the label label_emb = layers.Embedding(num_classes, latent_dim)(label) label_emb = layers.Flatten()(label_emb) # Concatenate noise and label as input input_concat = layers.Concatenate()([noise, label_emb]) # Dense layer model = layers.Dense(128 * 128)(input_concat) model = layers.LeakyReLU(alpha=0.2)(model) model = layers.Reshape((128, 128, 1))(model) # Output layer generated_image = layers.Conv2D( 3, kernel_size=1, padding="same", activation="tanh" )(model) # Define the generator model generator_model = tf.keras.Model(inputs=[noise, label], outputs=generated_image) return generator_model # Define the discriminator model def build_discriminator(input_shape, num_classes): # Input layers image = layers.Input(shape=input_shape) label = layers.Input(shape=(581,)) # Embed the label label_emb = layers.Embedding(num_classes, np.prod(input_shape))(label) label_emb = layers.Reshape(input_shape)(label_emb) # label_emb = layers.Flatten()(label_emb) # Concatenate image and label combined_input = layers.Concatenate()([image, label_emb]) # Convolutional layers model = layers.Conv2D(128, kernel_size=4, strides=2, padding="same")(image) model = layers.LeakyReLU(0.2)(model) model = layers.Conv2D(256, kernel_size=4, strides=2, padding="same")(model) model = layers.LeakyReLU(0.2)(model) model = layers.BatchNormalization()(model) model = layers.Conv2D(512, kernel_size=4, strides=2, padding="same")(model) model = layers.LeakyReLU(0.2)(model) model = layers.BatchNormalization()(model) # Flatten layer model = layers.Flatten()(model) # Output layer validity = layers.Dense(1, activation="sigmoid")(model) # Define the discriminator model discriminator_model = tf.keras.Model(inputs=[image, label], outputs=validity) return discriminator_model # Build the CGAN model def build_cgan(generator, discriminator): # Set the discriminator weights as non-trainable discriminator.trainable = False # Input layers noise = generator.input[0] label = generator.input[1] # Generate image generated_image = generator([noise, label]) # Get validity score validity = discriminator([generated_image, label]) # Define the CGAN model cgan_model = tf.keras.Model(inputs=[noise, label], outputs=validity) return cgan_model # Hyperparameters latent_dim = 100 input_shape = (128, 128, 3) batch_size = 64 epochs = 50 # Encode the train_class labels label_encoder = LabelEncoder() train_class_encoded = label_encoder.fit_transform(train_class) # Convert string labels to numerical labels label_mapping = {label: index for index, label in enumerate(np.unique(train_labels))} train_labels_numerical = np.array([label_mapping[label] for label in train_labels]) # Get the actual number of unique classes num_classes = len(label_mapping) # num class: 581 # Convert train_labels_numerical to one-hot encoding train_labels_encoded = tf.keras.utils.to_categorical( train_labels_numerical, num_classes ) # Normalize train_images to the range [-1, 1] train_images = (train_images - 0.5) * 2.0 generator = build_generator(latent_dim, num_classes) discriminator = build_discriminator(input_shape, num_classes) discriminator.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5), metrics=["accuracy"], ) # Build and compile the GAN model gan = build_cgan(generator, discriminator) gan.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy", ) # Train the GAN model def train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ): num_batches = train_images.shape[0] // batch_size # Training loop for epoch in range(epochs): for batch in range(num_batches): # --------------------- # Train the discriminator # --------------------- # Select a random batch of images and labels idx = np.random.randint(0, train_images.shape[0], batch_size) real_images = train_images[idx] real_labels = train_labels_encoded[idx] # Generate a batch of fake images and labels noise = np.random.normal(0, 1, (batch_size, latent_dim)) fake_labels = np.random.randint(0, num_classes, (batch_size, 1)) # fake_labels = np.squeeze(fake_labels, axis=1) # Reshape the fake_labels tensor fake_images = generator.predict([noise, fake_labels]) # Increase dimensions of real_images real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) # Concatenate real and fake images and labels print(real_labels.shape) print(fake_labels.shape) print(real_images_expanded.shape) print(fake_images.shape) # all_images = np.concatenate((real_images_expanded, fake_images),axis=0) # all_labels = np.concatenate((real_labels, fake_labels),axis=0) # Label smoothing: assign 0.9 for real images and 0.0 for fake images real_labels_smoothed = real_labels * 0.9 # Train the discriminator discriminator_loss_real = discriminator.train_on_batch( real_images, real_labels_smoothed ) discriminator_loss_fake = discriminator.train_on_batch( fake_images, np.zeros((batch_size, 1)) ) discriminator_loss = 0.5 * np.add( discriminator_loss_real, discriminator_loss_fake ) # ----------------- # Train the generator # ----------------- # Generate a batch of noise vectors noise = np.random.normal(0, 1, (batch_size, latent_dim)) # Train the generator generator_loss = gan.train_on_batch( [noise, fake_labels], np.ones((batch_size, 1)) ) # Print training progress print( f"Epoch {epoch+1}/{epochs}, Batch {batch+1}/{num_batches}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}" ) # Train the GAN model train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ) latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) fake_images = generator.predict([latent_vectors, class_labels]) print(fake_images.shape) print(real_images_expanded.shape) batch_indices = np.random.randint(0, train_images.shape[0], size=batch_size) real_images = train_images[batch_indices] real_labels = train_labels_encoded[batch_indices] print(train_labels_encoded.shape) print(real_labels.shape) all_labels = np.concatenate((real_labels, train_labels_encoded), axis=0) print(all_labels.shape) def train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ): num_batches = train_images.shape[0] // batch_size for epoch in range(epochs): for batch in range(num_batches): # Generate random latent vectors and class labels latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) # Generate fake images fake_images = generator.predict([latent_vectors, class_labels]) # Select a random batch of real images and their corresponding labels batch_indices = np.random.randint(0, train_images.shape[0], size=batch_size) real_images = train_images[batch_indices] real_labels = train_labels_encoded[batch_indices] # print(real_images) print(real_labels) # print(train) # Increase dimensions of real_images real_images_expanded = np.expand_dims(real_images, axis=-1) real_images_expanded = np.repeat(real_images_expanded, 3, axis=-1) # print(real_labels.shape) print("class") print(train_class_encoded) print("train_labels") print(train_labels_encoded) # Generate batch-sized labels for the GAN batch_labels_encoded = np.random.randint( 0, num_classes, size=(batch_size, 1) ) batch_labels_encoded = tf.keras.utils.to_categorical( batch_labels_encoded, num_classes ) # Repeat the labels for each image in the batch all_labels = np.repeat(batch_labels_encoded, real_images.shape[0], axis=0) # Concatenate real and fake images and labels all_images = np.concatenate((real_images_expanded, fake_images), axis=0) # all_labels = np.concatenate((real_labels, train_labels_encoded),axis=0) # Label smoothing: assign 0.9 for real images and 0.0 for fake images real_labels_smoothed = real_labels * 0.9 fake_labels = np.zeros_like(train_class_encoded) # Train the discriminator discriminator_loss_real = discriminator.train_on_batch( all_images, all_labels ) discriminator_loss_fake = discriminator.train_on_batch( fake_images, fake_labels ) discriminator_loss = 0.5 * np.add( discriminator_loss_real, discriminator_loss_fake ) # Generate new random latent vectors and class labels latent_vectors = np.random.normal(size=(batch_size, latent_dim)) class_labels = np.random.randint(0, num_classes, size=(batch_size, 1)) # Update the generator via the whole GAN model gan_labels = np.ones_like(class_labels) generator_loss = gan.train_on_batch( [latent_vectors, class_labels], gan_labels ) # Print the losses for each epoch print( f"Epoch {epoch+1}/{epochs}, Batch {batch+1}/{num_batches}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}" ) train_gan( train_images, train_labels_encoded, train_class_encoded, generator, discriminator, gan, latent_dim, num_classes, batch_size, epochs, ) generator.summary() discriminator.summary() gan.summary() generator.save("generator_model.h5") def generate_images(generator, latent_dim, num_images): latent_vectors = generate_latent_vectors(num_images, latent_dim) generated_images = generator.predict(latent_vectors) return generated_images num_generated_images = 10 generated_images = generate_images(generator, latent_dim, num_generated_images) import matplotlib.pyplot as plt fig, axs = plt.subplots(2, 5, figsize=(10, 4)) fig.subplots_adjust(hspace=0.4, wspace=0.4) for i in range(2): for j in range(5): axs[i, j].imshow(generated_images[i * 5 + j]) axs[i, j].axis("off") plt.show()
false
0
8,812
0
8,834
8,812
129264230
<jupyter_start><jupyter_text>Corona virus latest data 2023 The WHO coronavirus (COVID-19) dashboard presents official daily counts of COVID-19 cases, deaths and vaccine utilisation reported by countries, territories and areas. Through this dashboard, we aim to provide a frequently updated data visualization, data dissemination and data exploration resource, while linking users to other useful and informative resources. Caution must be taken when interpreting all data presented, and differences between information products published by WHO, national public health authorities, and other sources using different inclusion criteria and different data cut-off times are to be expected. While steps are taken to ensure accuracy and reliability, all data are subject to continuous verification and change. All counts are subject to variations in case detection, definitions, laboratory testing, vaccination strategy, and reporting strategies. Other important considerations are highlighted under the respective Data Sources below. The designations employed and the presentation of these materials do not imply the expression of any opinion whatsoever on the part of WHO concerning the legal status of any country, territory or area or of its authorities, or concerning the delimitation of its frontiers or boundaries. Dotted and dashed lines on maps represent approximate border lines for which there may not yet be full agreement. [1] All references to Kosovo should be understood to be in the context of the United Nations Security Council resolution 1244 (1999). [2] A dispute exists between the Governments of Argentina and the United Kingdom of Great Britain and Northern Ireland concerning sovereignty over the Falkland Islands (Malvinas). Data for Bonaire, Sint Eustatius and Saba have been disaggregated and displayed at the subnational level. Kaggle dataset identifier: corona-virus-latest-data-2023 <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Сoronavirus dataset analysis # Import libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set_style("darkgrid") # EDA data = pd.read_csv( "/kaggle/input/corona-virus-latest-data-2023/corona_virus.csv", encoding="ISO-8859-1", ) data.head() data.info() # Warning! In the column Tot Cases/1M pop there is an invisible symbol xA0. I used Notepad++ to copy the title data = data.rename(columns={"Tot Cases/1M pop": "TotalCases/1MPOP"}) # delete the results, they interfere with the analysis data = data.drop(index=231) data.columns # Remove commas and cast all values to float convert_feature = [ "Total Cases", "New Cases", "Total Deaths", "New Deaths", "Total Recovered", "New Recovered", "Active Cases", "Serious,Critical", "Deaths/1M pop", "Total Tests", "Tests/1M pop", "Population", "TotalCases/1MPOP", ] data = data.replace(to_replace=",", value="", regex=True) data[convert_feature] = data[convert_feature].astype(float) data.info() # now you can return the normal value to the column data = data.rename(columns={"TotalCases/1MPOP": "Total Cases/1M Pop"}) data.head() # there are missing values data = data.fillna(0) # Visualization top_cases = data.sort_values("Total Cases", ascending=False).head(10) sns.catplot( data=top_cases, x="Country,Other", y="Total Cases", kind="bar", height=7 ).set(title="Top 10 Cases Coronavirus") plt.show() top_death = data.sort_values("Total Deaths", ascending=False).head(10) sns.catplot( data=top_death, x="Country,Other", y="Total Deaths", kind="bar", height=7 ).set(title="Top 10 Deaths from Coronavirus") plt.show() top_recov = data.sort_values("Total Recovered", ascending=False).head(10) sns.catplot( data=top_recov, x="Country,Other", y="Total Recovered", kind="bar", height=7 ).set(title="Top 10 Recovered") plt.show() top_active = data.sort_values("Active Cases", ascending=False).head(10) sns.catplot( data=top_active, x="Country,Other", y="Active Cases", kind="bar", height=7 ).set(title="Top 10 Active Cases") plt.show() # # We see the USA lead in cases and deaths from coronavirus, as well as in those who have recovered. # This is probably due to the fact that in the United States it is better to collect statistics and put them in the public domain. # Japan leads in the active cases. Bad job, Japan. # top_cases_m = data.sort_values("Total Cases/1M Pop", ascending=False).head(10) sns.catplot( data=top_cases_m, x="Total Cases/1M Pop", y="Country,Other", kind="bar", height=6 ).set(title="Top 10 Total Cases/1M Pop") plt.show() # High population density means more cases top_death_m = data.sort_values("Deaths/1M pop", ascending=False).head(10) sns.catplot( data=top_death_m, x="Deaths/1M pop", y="Country,Other", kind="bar", height=6 ).set(title="Top 10 Deaths/1M pop") plt.show() # High mortality in poor countries. top_test = data.sort_values("Total Tests", ascending=False).head(10) sns.catplot( data=top_test, x="Total Tests", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Total Tests") plt.show() # # Rich countries can afford to test their populations to reduce the death toll. top_test_m = data.sort_values("Tests/1M pop", ascending=False).head(10) sns.catplot( data=top_test_m, x="Tests/1M pop", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Tests/1M pop") plt.show() # Feature Enginering data["Deaths per cases"] = (data["Total Deaths"] / data["Total Cases"]) * 100 antitop_medical = data.sort_values("Deaths per cases", ascending=False).head(10) sns.catplot( data=antitop_medical, x="Deaths per cases", y="Country,Other", kind="bar", height=7 ).set(title="Anti-rating: top 10 deaths for sick people") plt.show() data["Recovered per cases"] = (data["Total Recovered"] / data["Total Cases"]) * 100 top_medical = data.sort_values("Recovered per cases", ascending=False).head(10) sns.catplot( data=top_medical, x="Recovered per cases", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Recovered per Cases") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264230.ipynb
corona-virus-latest-data-2023
chitrakumari25
[{"Id": 129264230, "ScriptId": 38430900, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14592729, "CreationDate": "05/12/2023 09:31:03", "VersionNumber": 1.0, "Title": "\u0421oronavirus dataset analysis", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185153523, "KernelVersionId": 129264230, "SourceDatasetVersionId": 5558541}]
[{"Id": 5558541, "DatasetId": 3201332, "DatasourceVersionId": 5633326, "CreatorUserId": 9240831, "LicenseName": "ODC Public Domain Dedication and Licence (PDDL)", "CreationDate": "04/29/2023 16:00:51", "VersionNumber": 1.0, "Title": "Corona virus latest data 2023", "Slug": "corona-virus-latest-data-2023", "Subtitle": "Latest report of corona virus 2023", "Description": "The WHO coronavirus (COVID-19) dashboard presents official daily counts of COVID-19 cases, deaths and vaccine utilisation reported by countries, territories and areas. Through this dashboard, we aim to provide a frequently updated data visualization, data dissemination and data exploration resource, while linking users to other useful and informative resources.\n\nCaution must be taken when interpreting all data presented, and differences between information products published by WHO, national public health authorities, and other sources using different inclusion criteria and different data cut-off times are to be expected. While steps are taken to ensure accuracy and reliability, all data are subject to continuous verification and change. All counts are subject to variations in case detection, definitions, laboratory testing, vaccination strategy, and reporting strategies. Other important considerations are highlighted under the respective Data Sources below.\n\nThe designations employed and the presentation of these materials do not imply the expression of any opinion whatsoever on the part of WHO concerning the legal status of any country, territory or area or of its authorities, or concerning the delimitation of its frontiers or boundaries. Dotted and dashed lines on maps represent approximate border lines for which there may not yet be full agreement.\n\n[1] All references to Kosovo should be understood to be in the context of the United Nations Security Council resolution 1244 (1999).\n\n[2] A dispute exists between the Governments of Argentina and the United Kingdom of Great Britain and Northern Ireland concerning sovereignty over the Falkland Islands (Malvinas).\n\nData for Bonaire, Sint Eustatius and Saba have been disaggregated and displayed at the subnational level.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3201332, "CreatorUserId": 9240831, "OwnerUserId": 9240831.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5558541.0, "CurrentDatasourceVersionId": 5633326.0, "ForumId": 3265987, "Type": 2, "CreationDate": "04/29/2023 16:00:51", "LastActivityDate": "04/29/2023", "TotalViews": 13948, "TotalDownloads": 2964, "TotalVotes": 60, "TotalKernels": 6}]
[{"Id": 9240831, "UserName": "chitrakumari25", "DisplayName": "Chitrakumari", "RegisterDate": "12/23/2021", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Сoronavirus dataset analysis # Import libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set_style("darkgrid") # EDA data = pd.read_csv( "/kaggle/input/corona-virus-latest-data-2023/corona_virus.csv", encoding="ISO-8859-1", ) data.head() data.info() # Warning! In the column Tot Cases/1M pop there is an invisible symbol xA0. I used Notepad++ to copy the title data = data.rename(columns={"Tot Cases/1M pop": "TotalCases/1MPOP"}) # delete the results, they interfere with the analysis data = data.drop(index=231) data.columns # Remove commas and cast all values to float convert_feature = [ "Total Cases", "New Cases", "Total Deaths", "New Deaths", "Total Recovered", "New Recovered", "Active Cases", "Serious,Critical", "Deaths/1M pop", "Total Tests", "Tests/1M pop", "Population", "TotalCases/1MPOP", ] data = data.replace(to_replace=",", value="", regex=True) data[convert_feature] = data[convert_feature].astype(float) data.info() # now you can return the normal value to the column data = data.rename(columns={"TotalCases/1MPOP": "Total Cases/1M Pop"}) data.head() # there are missing values data = data.fillna(0) # Visualization top_cases = data.sort_values("Total Cases", ascending=False).head(10) sns.catplot( data=top_cases, x="Country,Other", y="Total Cases", kind="bar", height=7 ).set(title="Top 10 Cases Coronavirus") plt.show() top_death = data.sort_values("Total Deaths", ascending=False).head(10) sns.catplot( data=top_death, x="Country,Other", y="Total Deaths", kind="bar", height=7 ).set(title="Top 10 Deaths from Coronavirus") plt.show() top_recov = data.sort_values("Total Recovered", ascending=False).head(10) sns.catplot( data=top_recov, x="Country,Other", y="Total Recovered", kind="bar", height=7 ).set(title="Top 10 Recovered") plt.show() top_active = data.sort_values("Active Cases", ascending=False).head(10) sns.catplot( data=top_active, x="Country,Other", y="Active Cases", kind="bar", height=7 ).set(title="Top 10 Active Cases") plt.show() # # We see the USA lead in cases and deaths from coronavirus, as well as in those who have recovered. # This is probably due to the fact that in the United States it is better to collect statistics and put them in the public domain. # Japan leads in the active cases. Bad job, Japan. # top_cases_m = data.sort_values("Total Cases/1M Pop", ascending=False).head(10) sns.catplot( data=top_cases_m, x="Total Cases/1M Pop", y="Country,Other", kind="bar", height=6 ).set(title="Top 10 Total Cases/1M Pop") plt.show() # High population density means more cases top_death_m = data.sort_values("Deaths/1M pop", ascending=False).head(10) sns.catplot( data=top_death_m, x="Deaths/1M pop", y="Country,Other", kind="bar", height=6 ).set(title="Top 10 Deaths/1M pop") plt.show() # High mortality in poor countries. top_test = data.sort_values("Total Tests", ascending=False).head(10) sns.catplot( data=top_test, x="Total Tests", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Total Tests") plt.show() # # Rich countries can afford to test their populations to reduce the death toll. top_test_m = data.sort_values("Tests/1M pop", ascending=False).head(10) sns.catplot( data=top_test_m, x="Tests/1M pop", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Tests/1M pop") plt.show() # Feature Enginering data["Deaths per cases"] = (data["Total Deaths"] / data["Total Cases"]) * 100 antitop_medical = data.sort_values("Deaths per cases", ascending=False).head(10) sns.catplot( data=antitop_medical, x="Deaths per cases", y="Country,Other", kind="bar", height=7 ).set(title="Anti-rating: top 10 deaths for sick people") plt.show() data["Recovered per cases"] = (data["Total Recovered"] / data["Total Cases"]) * 100 top_medical = data.sort_values("Recovered per cases", ascending=False).head(10) sns.catplot( data=top_medical, x="Recovered per cases", y="Country,Other", kind="bar", height=7 ).set(title="Top 10 Recovered per Cases") plt.show()
false
1
1,586
0
2,013
1,586
129264879
<jupyter_start><jupyter_text>Human Life Expectancy Around the World ### Context The human lifespan is the maximum number of years an individual from the human species can live based on observed examples. ### Fact The longest verified lifespan for any human is that of Frenchwoman Jeanne Calment, who is verified as having lived to age 122 years. Kaggle dataset identifier: human-life-expectancy-around-the-world <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Zamierzam stworzyć model opisujący średnią długość życia na świecie. Posłużę się danymi (niestety nieważonymi, więc zakładającymi, że każdy kraj na świecie ma taką samą populację) dla każdego kraju na świecie dla lat 1990-2019. data = pd.read_csv( "/kaggle/input/human-life-expectancy-around-the-world/Human_life_Expectancy.csv" ) data.head() # Zostawiam tylko dane o krajach (usuwam regiony), żeby uniknąć duplikacji. data.drop(data.loc[data["Level"] != "National"].index, inplace=True) # Dane nie mają NaN... data.isna().sum() # ...ale są złego typu i mają stringi... stripped_data = data.drop(columns=["Country", "Country_Code", "Level", "Region"]) stripped_data.head() stripped_data.dtypes # ...więc konwertuję wszystko na numeric df = stripped_data.apply(pd.to_numeric, errors="coerce") df.dtypes means = df.mean() means_df = means.to_frame() means_df.rename(columns={list(means_df)[0]: "Life_Expectancy"}, inplace=True) means_df.index.names = ["Year"] means_df.head() X = means.keys().astype(int) Y = means.values print(X) print(Y) def model(parametry, x): a, b = parametry return a * x + b a_est, b_est = 0.24, -412 X_test = np.linspace(start=X.min(), stop=X.max()) Y_pred = model(parametry=[a_est, b_est], x=X_test) plt.scatter(X, Y) plt.plot(X_test, Y_pred, color="tab:orange") plt.xlabel("x - rok", fontsize=14) plt.ylabel("y - średnia długość życia na świecie", fontsize=14) plt.show() X_train, X_test, Y_train, Y_test = train_test_split(X, Y) X_train, X_test = X_train.values, X_test.values # ========= Model liniowy ========= from sklearn.linear_model import LinearRegression model_lin = LinearRegression() model_lin.fit(X_train.reshape(-1, 1), Y_train) print( f"Parametry modelu liniowego: {np.round(model_lin.coef_,5)}, {np.round(model_lin.intercept_,5)}" ) MSE_lin = mean_squared_error(Y_test, model_lin.predict(X_test.reshape(-1, 1))) print(f"Błąd średniokwadratowy modelu liniowego: {MSE_lin:0.3}\n") # ===== Uogólniony model liniowy (ang. Generalized Linear Model) ===== from sklearn.preprocessing import PolynomialFeatures model_GLM = LinearRegression() gen_features = PolynomialFeatures(degree=2, include_bias=True, interaction_only=False) model_GLM.fit(gen_features.fit_transform(X_train.reshape(-1, 1)), Y_train) print( f"Parametry modelu GLM: {np.round(model_GLM.coef_,4)}, {np.round(model_GLM.intercept_,5)}" ) MSE_GLM = mean_squared_error( Y_test, model_GLM.predict(gen_features.fit_transform(X_test.reshape(-1, 1))) ) print(f"Błąd średniokwadratowy modelu GLM: {MSE_GLM:0.3}\n") # ==== Maszyna wektorów wspierających (ang. Support Vector Machine) ==== # SVR dla regresji, SVC dla kalsyfikacji from sklearn.svm import SVR model_svr = SVR(kernel="rbf", gamma="scale", C=1) model_svr.fit(X_train.reshape(-1, 1), Y_train) MSE_SVR = mean_squared_error(Y_test, model_svr.predict(X_test.reshape(-1, 1))) print(f"Błąd średniokwadratowy modelu SVR: {MSE_SVR:0.3}") # Decision Tree Regressor from sklearn.tree import DecisionTreeRegressor model_tree = DecisionTreeRegressor() model_tree.fit(X_train.reshape(-1, 1), Y_train) MSE_tree = mean_squared_error(Y_test, model_tree.predict(X_test.reshape(-1, 1))) # Random Forest Regressor from sklearn.ensemble import RandomForestRegressor model_rf = RandomForestRegressor(n_estimators=100, random_state=42) model_rf.fit(X_train.reshape(-1, 1), Y_train) MSE_rf = mean_squared_error(Y_test, model_rf.predict(X_test.reshape(-1, 1))) # Predykcje wszystkich modeli dla całego zakreso osi X os_x = np.linspace(start=X.min(), stop=X.max() + 10, num=300) y_lin_pred = model_lin.predict(os_x.reshape(-1, 1)) y_GLM_pred = model_GLM.predict(gen_features.fit_transform(os_x.reshape(-1, 1))) y_svr_pred = model_svr.predict(os_x.reshape(-1, 1)) y_tree_pred = model_tree.predict(os_x.reshape(-1, 1)) y_rf_pred = model_rf.predict(os_x.reshape(-1, 1)) # Wizualizacja plt.figure(figsize=(10, 7)) plt.scatter(X_train, Y_train, label="dane treningowe", alpha=0.7) plt.scatter(X_test, Y_test, edgecolor="black", facecolor="none", label="dane testujące") plt.plot(os_x, y_lin_pred, label="model liniowy", color="tab:orange") plt.plot(os_x, y_GLM_pred, label=f"model GLM", color="tab:red") plt.plot(os_x, y_svr_pred, label="model SVR", color="tab:green") plt.plot(os_x, y_tree_pred, label="model Decision Tree", color="tab:blue") plt.plot(os_x, y_rf_pred, label="model Random Forest", color="tab:purple") plt.xlabel("Rok", fontsize=14) plt.ylabel("Średnia długość życia", fontsize=14) plt.legend(fontsize=12, shadow=True, loc="lower right") plt.ylim([Y.min() - 0.1, Y.max() + 0.5]) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264879.ipynb
human-life-expectancy-around-the-world
deepcontractor
[{"Id": 129264879, "ScriptId": 38405751, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14426903, "CreationDate": "05/12/2023 09:37:11", "VersionNumber": 1.0, "Title": "notebooka2758bec2c", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 140.0, "LinesInsertedFromPrevious": 140.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185154716, "KernelVersionId": 129264879, "SourceDatasetVersionId": 3004133}]
[{"Id": 3004133, "DatasetId": 1840299, "DatasourceVersionId": 3051964, "CreatorUserId": 3682357, "LicenseName": "CC0: Public Domain", "CreationDate": "01/04/2022 15:19:18", "VersionNumber": 1.0, "Title": "Human Life Expectancy Around the World", "Slug": "human-life-expectancy-around-the-world", "Subtitle": "country wise life expectancy of humans (historical data)", "Description": "### Context\n\nThe human lifespan is the maximum number of years an individual from the human species can live based on observed examples. \n\n### Fact\n\nThe longest verified lifespan for any human is that of Frenchwoman Jeanne Calment, who is verified as having lived to age 122 years.\n\n\n### Acknowledgements\n\nDataset [source](https://globaldatalab.org/shdi/lifexp/?levels=1%2B4&interpolation=0&extrapolation=0&nearest_real=0&colour_scales=national)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1840299, "CreatorUserId": 3682357, "OwnerUserId": 3682357.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3004133.0, "CurrentDatasourceVersionId": 3051964.0, "ForumId": 1863139, "Type": 2, "CreationDate": "01/04/2022 15:19:18", "LastActivityDate": "01/04/2022", "TotalViews": 14115, "TotalDownloads": 1738, "TotalVotes": 58, "TotalKernels": 6}]
[{"Id": 3682357, "UserName": "deepcontractor", "DisplayName": "Deep Contractor", "RegisterDate": "09/09/2019", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Zamierzam stworzyć model opisujący średnią długość życia na świecie. Posłużę się danymi (niestety nieważonymi, więc zakładającymi, że każdy kraj na świecie ma taką samą populację) dla każdego kraju na świecie dla lat 1990-2019. data = pd.read_csv( "/kaggle/input/human-life-expectancy-around-the-world/Human_life_Expectancy.csv" ) data.head() # Zostawiam tylko dane o krajach (usuwam regiony), żeby uniknąć duplikacji. data.drop(data.loc[data["Level"] != "National"].index, inplace=True) # Dane nie mają NaN... data.isna().sum() # ...ale są złego typu i mają stringi... stripped_data = data.drop(columns=["Country", "Country_Code", "Level", "Region"]) stripped_data.head() stripped_data.dtypes # ...więc konwertuję wszystko na numeric df = stripped_data.apply(pd.to_numeric, errors="coerce") df.dtypes means = df.mean() means_df = means.to_frame() means_df.rename(columns={list(means_df)[0]: "Life_Expectancy"}, inplace=True) means_df.index.names = ["Year"] means_df.head() X = means.keys().astype(int) Y = means.values print(X) print(Y) def model(parametry, x): a, b = parametry return a * x + b a_est, b_est = 0.24, -412 X_test = np.linspace(start=X.min(), stop=X.max()) Y_pred = model(parametry=[a_est, b_est], x=X_test) plt.scatter(X, Y) plt.plot(X_test, Y_pred, color="tab:orange") plt.xlabel("x - rok", fontsize=14) plt.ylabel("y - średnia długość życia na świecie", fontsize=14) plt.show() X_train, X_test, Y_train, Y_test = train_test_split(X, Y) X_train, X_test = X_train.values, X_test.values # ========= Model liniowy ========= from sklearn.linear_model import LinearRegression model_lin = LinearRegression() model_lin.fit(X_train.reshape(-1, 1), Y_train) print( f"Parametry modelu liniowego: {np.round(model_lin.coef_,5)}, {np.round(model_lin.intercept_,5)}" ) MSE_lin = mean_squared_error(Y_test, model_lin.predict(X_test.reshape(-1, 1))) print(f"Błąd średniokwadratowy modelu liniowego: {MSE_lin:0.3}\n") # ===== Uogólniony model liniowy (ang. Generalized Linear Model) ===== from sklearn.preprocessing import PolynomialFeatures model_GLM = LinearRegression() gen_features = PolynomialFeatures(degree=2, include_bias=True, interaction_only=False) model_GLM.fit(gen_features.fit_transform(X_train.reshape(-1, 1)), Y_train) print( f"Parametry modelu GLM: {np.round(model_GLM.coef_,4)}, {np.round(model_GLM.intercept_,5)}" ) MSE_GLM = mean_squared_error( Y_test, model_GLM.predict(gen_features.fit_transform(X_test.reshape(-1, 1))) ) print(f"Błąd średniokwadratowy modelu GLM: {MSE_GLM:0.3}\n") # ==== Maszyna wektorów wspierających (ang. Support Vector Machine) ==== # SVR dla regresji, SVC dla kalsyfikacji from sklearn.svm import SVR model_svr = SVR(kernel="rbf", gamma="scale", C=1) model_svr.fit(X_train.reshape(-1, 1), Y_train) MSE_SVR = mean_squared_error(Y_test, model_svr.predict(X_test.reshape(-1, 1))) print(f"Błąd średniokwadratowy modelu SVR: {MSE_SVR:0.3}") # Decision Tree Regressor from sklearn.tree import DecisionTreeRegressor model_tree = DecisionTreeRegressor() model_tree.fit(X_train.reshape(-1, 1), Y_train) MSE_tree = mean_squared_error(Y_test, model_tree.predict(X_test.reshape(-1, 1))) # Random Forest Regressor from sklearn.ensemble import RandomForestRegressor model_rf = RandomForestRegressor(n_estimators=100, random_state=42) model_rf.fit(X_train.reshape(-1, 1), Y_train) MSE_rf = mean_squared_error(Y_test, model_rf.predict(X_test.reshape(-1, 1))) # Predykcje wszystkich modeli dla całego zakreso osi X os_x = np.linspace(start=X.min(), stop=X.max() + 10, num=300) y_lin_pred = model_lin.predict(os_x.reshape(-1, 1)) y_GLM_pred = model_GLM.predict(gen_features.fit_transform(os_x.reshape(-1, 1))) y_svr_pred = model_svr.predict(os_x.reshape(-1, 1)) y_tree_pred = model_tree.predict(os_x.reshape(-1, 1)) y_rf_pred = model_rf.predict(os_x.reshape(-1, 1)) # Wizualizacja plt.figure(figsize=(10, 7)) plt.scatter(X_train, Y_train, label="dane treningowe", alpha=0.7) plt.scatter(X_test, Y_test, edgecolor="black", facecolor="none", label="dane testujące") plt.plot(os_x, y_lin_pred, label="model liniowy", color="tab:orange") plt.plot(os_x, y_GLM_pred, label=f"model GLM", color="tab:red") plt.plot(os_x, y_svr_pred, label="model SVR", color="tab:green") plt.plot(os_x, y_tree_pred, label="model Decision Tree", color="tab:blue") plt.plot(os_x, y_rf_pred, label="model Random Forest", color="tab:purple") plt.xlabel("Rok", fontsize=14) plt.ylabel("Średnia długość życia", fontsize=14) plt.legend(fontsize=12, shadow=True, loc="lower right") plt.ylim([Y.min() - 0.1, Y.max() + 0.5]) plt.show()
false
1
2,053
0
2,159
2,053
129264516
import numpy as np import pandas as pd import pytorch_lightning as pl import matplotlib.pyplot as plt from neuralforecast import NeuralForecast from neuralforecast.models import LSTM from neuralforecast.losses.pytorch import MQLoss, DistributionLoss from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesLoader from neuralforecast.auto import AutoLSTM from neuralforecast.tsdataset import TimeSeriesDataset from neuralforecast.utils import AirPassengersDF as Y_df Y_df.describe() Y_train_df = Y_df[Y_df.ds <= "1959-12-31"] # 132 train Y_test_df = Y_df[Y_df.ds > "1959-12-31"] # 12 test # Data harus dalam bentuk TimeSeriesDataset dataset_train, *_ = TimeSeriesDataset.from_df(Y_train_df) dataset_test, *_ = TimeSeriesDataset.from_df(Y_test_df) config = dict(max_steps=50, val_check_steps=50, input_size=132, encoder_hidden_size=200) model = AutoLSTM(h=12, config=config, num_samples=50, cpus=2, verbose=True) len(Y_train_df) model.fit(dataset=dataset_train) y_hat = model.predict(dataset=dataset_test) len(y_hat) len(Y_test_df["y"]) from sklearn.metrics import mean_absolute_percentage_error np.sqrt(mean_absolute_percentage_error(Y_test_df["y"], y_hat))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264516.ipynb
null
null
[{"Id": 129264516, "ScriptId": 38427712, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8566858, "CreationDate": "05/12/2023 09:33:41", "VersionNumber": 2.0, "Title": "NeuralForecast AutoML", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 42.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 41.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import pytorch_lightning as pl import matplotlib.pyplot as plt from neuralforecast import NeuralForecast from neuralforecast.models import LSTM from neuralforecast.losses.pytorch import MQLoss, DistributionLoss from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesLoader from neuralforecast.auto import AutoLSTM from neuralforecast.tsdataset import TimeSeriesDataset from neuralforecast.utils import AirPassengersDF as Y_df Y_df.describe() Y_train_df = Y_df[Y_df.ds <= "1959-12-31"] # 132 train Y_test_df = Y_df[Y_df.ds > "1959-12-31"] # 12 test # Data harus dalam bentuk TimeSeriesDataset dataset_train, *_ = TimeSeriesDataset.from_df(Y_train_df) dataset_test, *_ = TimeSeriesDataset.from_df(Y_test_df) config = dict(max_steps=50, val_check_steps=50, input_size=132, encoder_hidden_size=200) model = AutoLSTM(h=12, config=config, num_samples=50, cpus=2, verbose=True) len(Y_train_df) model.fit(dataset=dataset_train) y_hat = model.predict(dataset=dataset_test) len(y_hat) len(Y_test_df["y"]) from sklearn.metrics import mean_absolute_percentage_error np.sqrt(mean_absolute_percentage_error(Y_test_df["y"], y_hat))
false
0
414
0
414
414
129264916
<jupyter_start><jupyter_text>UCI Bag Of Words Kaggle dataset identifier: uci-bag-of-words <jupyter_script>import numpy as np import pandas as pd import os import random from sklearn.cluster import KMeans from sklearn.decomposition import PCA, TruncatedSVD from sklearn.metrics import silhouette_score from sklearn.metrics.pairwise import cosine_similarity from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer def load_bow_data(data_name): """ Reads a UCI bag-of-words dataset and returns it as a dataframe. """ file_path = os.path.join("..", f"input/uci-bag-of-words/docword.{data_name}.txt") data_frame = pd.read_csv( file_path, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" " ) assert data_frame.isnull().sum().sum() == 0 return data_frame def load_vocab_data(data_name): """ Reads a vocabulary file and determines the ID based on the index. Handles the case where "null" is part of the vocabulary, which pandas reads as np.nan, and sets it back to the string "null". """ file_path = os.path.join("..", f"input/uci-bag-of-words/vocab.{data_name}.txt") data_frame = pd.read_csv(file_path, header=None, names=["word"]).fillna("null") data_frame["wordID"] = data_frame.index + 1 assert data_frame.isnull().sum().sum() == 0 return data_frame enron_data = load_bow_data("enron") enron_vocab_data = load_vocab_data("enron") kos_data = load_bow_data("kos") kos_vocab_data = load_vocab_data("kos") nips_data = load_bow_data("nips") nips_vocab_data = load_vocab_data("nips") enron_data.head() # Count the number of unique documents in each dataset for name, dataset in (("enron", enron_data), ("kos", kos_data), ("nips", nips_data)): print(f"Count {name}: {dataset['docID'].nunique()}") # Unbalanced data set, we reduced enron since it caused us memory/performance issues random.seed(42) selected_doc_ids = random.sample(list(set(enron_data["docID"])), k=15000) enron_data = enron_data[enron_data["docID"].isin(selected_doc_ids)].reset_index( drop=True ) enron_data["docID"].nunique() # Merge three datasets into a single corpus # We need to offset the overlapping IDs data_frames = [] id_offset = 0 for data, vocabulary in ( (enron_data, enron_vocab_data), (kos_data, kos_vocab_data), (nips_data, nips_vocab_data), ): # Update document IDs to avoid overlap updated_ids = data["docID"] + id_offset data["new_id"] = updated_ids # Update the offset for the next dataset id_offset = updated_ids.max() # Merge the data with its corresponding vocabulary merged_data = data.merge(vocabulary)[["new_id", "word", "count"]] # Append the merged data to the list of data frames data_frames.append(merged_data) # We also need to handle the case with dublicate words # def find_removed_and_present_words(data, vocabulary): """ Identifies words that exist in the vocabulary but not in the corpus data. Using set difference, this function calculates the IDs present in the vocabulary but absent in the corpus. Then, it retrieves the corresponding words. Additionally, this function returns a set of words that are present in the corpus for comparison across different corpora. """ vocab_word_ids = set(vocabulary["wordID"]) corpus_word_ids = set(data["wordID"]) # Find removed and present word IDs removed_word_ids = vocab_word_ids - corpus_word_ids present_word_ids = corpus_word_ids.intersection(vocab_word_ids) # Retrieve corresponding words from vocabulary removed_words = set(vocabulary[vocabulary["wordID"].isin(removed_word_ids)]["word"]) present_words = set(vocabulary[vocabulary["wordID"].isin(present_word_ids)]["word"]) return removed_words, present_words # Find removed and present words for each dataset removed_enron_words, present_enron_words = find_removed_and_present_words( enron_data, enron_vocab_data ) removed_kos_words, present_kos_words = find_removed_and_present_words( kos_data, kos_vocab_data ) removed_nips_words, present_nips_words = find_removed_and_present_words( nips_data, nips_vocab_data ) # Concatenate the data frames and create a new vocabulary dataframe combined_data = pd.concat(data_frames, ignore_index=True).rename( columns={"new_id": "docID"} ) distinct_words = combined_data["word"].unique() distinct_words.sort() combined_vocab = ( pd.DataFrame({"word": distinct_words}) .reset_index() .rename(columns={"index": "wordID"}) ) combined_data.head() combined_vocab.head() # Associate the 'wordID' from the new vocabulary with the combined corpus combined_data = combined_data.merge(combined_vocab, how="left") combined_data = ( combined_data[["docID", "wordID", "count"]] .sort_values(["docID", "wordID"]) .reset_index(drop=True) ) combined_data.head() # Now all three corpora are combined into a single corpus and vocabulary, where each word and document has its own unique id. # # Create a word-document matrix from this corpus # Create a term-document matrix from the combined corpus tdm = ( combined_data.pivot(index="wordID", columns="docID", values="count") .fillna(0.0) .astype(pd.SparseDtype("float", 0.0)) ) print(tdm) # The fillna(0.0) function replaces any missing values (NaN) with zeros, # and astype(pd.SparseDtype("float", 0.0)) converts the dataframe to a # sparse matrix with a float datatype to save memory, as most of the # values in the matrix would be zero. # # Using SKLearn, find the truncated singular value decomposition of this matrix, retaining the first 100 dimensions # Apply TruncatedSVD to reduce the dimensionality of the term-document matrix svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42) svd_output = svd.fit_transform(tdm) svd_output.shape word_svd = pd.DataFrame(svd_output, index=combined_vocab["word"]) word_svd.head() word_svd.loc[:, 0].describe() # We noticed that many words in this topic are not relevant, but some documents above a certain level seem to be closely related. We can set a threshold and examine the top 20 terms that are most associated with this topic. topic_num = 0 cutoff = word_svd.loc[:, topic_num].quantile(0.75) print(f"Topic: {topic_num}") # Retrieve the top 20 words related to the topic top_words = list( word_svd[word_svd[topic_num] > cutoff] .sort_values([topic_num], ascending=False) .index )[:20] print(top_words) # - Are these dimensions interpretable? # Yes, the dimensions are topics. # - What does dimension 1 represent? # We will merge the SVD array with the actual words and examine dimension 1 (column 0) to interpret the first dimension. # - What do the top 10 dimensions represent? # Top 10 Topics. for topic in range(10): threshold = word_svd.loc[:, topic].quantile(0.999) print(f"Topic: {topic}") # Get the top 10 words associated with the topic words = list( word_svd[word_svd[topic] > threshold] .sort_values([topic], ascending=False) .index )[:20] print(words) # # Determine the average cosine similarity between documents within in each corpus. Next, determine the average cosine similarity between documents across corpora # # Read in the Enron dataset and reduce its size enron = load_bow_data("enron") enron = enron[enron["docID"].isin(selected_doc_ids)].reset_index(drop=True) # Read in the Kos and NIPS datasets kos = load_bow_data("kos") nips = load_bow_data("nips") # Initialize a list to store average cosine similarities average_cos_sims = [] # Create term-document matrices for Enron, Kos, and NIPS datasets and calculate cosine similarities datasets = [("enron", enron), ("kos", kos), ("nips", nips)] for name, data in datasets: tdm = ( data.pivot(index="docID", columns="wordID", values="count") .fillna(0) .astype(pd.SparseDtype("int16", 0)) ) similarities = cosine_similarity(tdm, dense_output=False) average_cos_sims.append((name, similarities.mean())) # Create term-document matrix for the combined dataset and calculate cosine similarities combined_tdm = ( combined_data.pivot(index="docID", columns="wordID", values="count") .fillna(0) .astype(pd.SparseDtype("int16", 0)) ) combined_sims = cosine_similarity(combined_tdm, dense_output=False) average_cos_sims.append(("all", combined_sims.mean())) # Create a dataframe to display the average cosine similarities for each corpus cosine_sims_df = pd.DataFrame( average_cos_sims, columns=["corpus", "average_cosine_similarity"] ) cosine_sims_df cosine_sims_df # # LSA from sklearn.cluster import KMeans tdm = ( combined_data.pivot(index="wordID", columns="docID", values="count") .fillna(0.0) .astype(pd.SparseDtype("float", 0.0)) ) dtm = tdm.T dtm # lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False)) # X_lsa = lsa.fit_transform(dtm) # X_lsa = lsa.fit_transform(dtm) # explained_variance = lsa[0].explained_variance_ratio_.sum() # print(f"Explained variance of the SVD step: {explained_variance * 100:.1f}%") # Create an LSA pipeline with TruncatedSVD and Normalizer lsa_pipeline = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False)) # Apply the LSA pipeline to the DTM X_lsa_transformed = lsa_pipeline.fit_transform(dtm) # Calculate the explained variance total_explained_variance = lsa_pipeline[0].explained_variance_ratio_.sum() n_clusters = 7 kmeans = KMeans(n_clusters=n_clusters).fit(X_lsa_transformed) # Assign labels to the DTM labels = kmeans.labels_ dtm["label"] = labels # Display the number of documents in each cluster dtm["label"].value_counts() # Print the top 10 words for each cluster for cluster_num in range(n_clusters): print(f"Cluster: {cluster_num}") current_cluster = dtm[dtm["label"] == cluster_num] top_ten_words = set( current_cluster.sum().sort_values(ascending=False).head(10).index ) print(combined_vocab[combined_vocab["wordID"].isin(top_ten_words)]["word"].tolist()) # # PCA # Create a document-term matrix dtm = combined_data.pivot(index="wordID", columns="docID", values="count").fillna(0.0) # Perform PCA on the DTM pca = PCA(n_components=100) pca_transformed = pca.fit_transform(dtm) # Create a DataFrame of PCA components and words word_pca = pd.DataFrame(pca_transformed, index=combined_vocab["word"]) # Extract the top 10 words for each of the first 10 components for component in range(10): threshold = word_pca.loc[:, component].quantile(0.999) print(f"Component: {component}") # Get the top 10 words associated with the component words = list( word_pca[word_pca[component] > threshold] .sort_values([component], ascending=False) .index )[:20] print(words)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/264/129264916.ipynb
uci-bag-of-words
aslanovmustafa
[{"Id": 129264916, "ScriptId": 38310557, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14319122, "CreationDate": "05/12/2023 09:37:36", "VersionNumber": 1.0, "Title": "Asad/Turqay Project2", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 271.0, "LinesInsertedFromPrevious": 271.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185154763, "KernelVersionId": 129264916, "SourceDatasetVersionId": 3639953}]
[{"Id": 3639953, "DatasetId": 2180053, "DatasourceVersionId": 3693638, "CreatorUserId": 8278966, "LicenseName": "Unknown", "CreationDate": "05/15/2022 12:47:05", "VersionNumber": 1.0, "Title": "UCI Bag Of Words", "Slug": "uci-bag-of-words", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2180053, "CreatorUserId": 8278966, "OwnerUserId": 8278966.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3639953.0, "CurrentDatasourceVersionId": 3693638.0, "ForumId": 2206001, "Type": 2, "CreationDate": "05/15/2022 12:47:05", "LastActivityDate": "05/15/2022", "TotalViews": 587, "TotalDownloads": 68, "TotalVotes": 1, "TotalKernels": 15}]
[{"Id": 8278966, "UserName": "aslanovmustafa", "DisplayName": "Mustafa Aslanov", "RegisterDate": "09/04/2021", "PerformanceTier": 0}]
import numpy as np import pandas as pd import os import random from sklearn.cluster import KMeans from sklearn.decomposition import PCA, TruncatedSVD from sklearn.metrics import silhouette_score from sklearn.metrics.pairwise import cosine_similarity from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer def load_bow_data(data_name): """ Reads a UCI bag-of-words dataset and returns it as a dataframe. """ file_path = os.path.join("..", f"input/uci-bag-of-words/docword.{data_name}.txt") data_frame = pd.read_csv( file_path, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" " ) assert data_frame.isnull().sum().sum() == 0 return data_frame def load_vocab_data(data_name): """ Reads a vocabulary file and determines the ID based on the index. Handles the case where "null" is part of the vocabulary, which pandas reads as np.nan, and sets it back to the string "null". """ file_path = os.path.join("..", f"input/uci-bag-of-words/vocab.{data_name}.txt") data_frame = pd.read_csv(file_path, header=None, names=["word"]).fillna("null") data_frame["wordID"] = data_frame.index + 1 assert data_frame.isnull().sum().sum() == 0 return data_frame enron_data = load_bow_data("enron") enron_vocab_data = load_vocab_data("enron") kos_data = load_bow_data("kos") kos_vocab_data = load_vocab_data("kos") nips_data = load_bow_data("nips") nips_vocab_data = load_vocab_data("nips") enron_data.head() # Count the number of unique documents in each dataset for name, dataset in (("enron", enron_data), ("kos", kos_data), ("nips", nips_data)): print(f"Count {name}: {dataset['docID'].nunique()}") # Unbalanced data set, we reduced enron since it caused us memory/performance issues random.seed(42) selected_doc_ids = random.sample(list(set(enron_data["docID"])), k=15000) enron_data = enron_data[enron_data["docID"].isin(selected_doc_ids)].reset_index( drop=True ) enron_data["docID"].nunique() # Merge three datasets into a single corpus # We need to offset the overlapping IDs data_frames = [] id_offset = 0 for data, vocabulary in ( (enron_data, enron_vocab_data), (kos_data, kos_vocab_data), (nips_data, nips_vocab_data), ): # Update document IDs to avoid overlap updated_ids = data["docID"] + id_offset data["new_id"] = updated_ids # Update the offset for the next dataset id_offset = updated_ids.max() # Merge the data with its corresponding vocabulary merged_data = data.merge(vocabulary)[["new_id", "word", "count"]] # Append the merged data to the list of data frames data_frames.append(merged_data) # We also need to handle the case with dublicate words # def find_removed_and_present_words(data, vocabulary): """ Identifies words that exist in the vocabulary but not in the corpus data. Using set difference, this function calculates the IDs present in the vocabulary but absent in the corpus. Then, it retrieves the corresponding words. Additionally, this function returns a set of words that are present in the corpus for comparison across different corpora. """ vocab_word_ids = set(vocabulary["wordID"]) corpus_word_ids = set(data["wordID"]) # Find removed and present word IDs removed_word_ids = vocab_word_ids - corpus_word_ids present_word_ids = corpus_word_ids.intersection(vocab_word_ids) # Retrieve corresponding words from vocabulary removed_words = set(vocabulary[vocabulary["wordID"].isin(removed_word_ids)]["word"]) present_words = set(vocabulary[vocabulary["wordID"].isin(present_word_ids)]["word"]) return removed_words, present_words # Find removed and present words for each dataset removed_enron_words, present_enron_words = find_removed_and_present_words( enron_data, enron_vocab_data ) removed_kos_words, present_kos_words = find_removed_and_present_words( kos_data, kos_vocab_data ) removed_nips_words, present_nips_words = find_removed_and_present_words( nips_data, nips_vocab_data ) # Concatenate the data frames and create a new vocabulary dataframe combined_data = pd.concat(data_frames, ignore_index=True).rename( columns={"new_id": "docID"} ) distinct_words = combined_data["word"].unique() distinct_words.sort() combined_vocab = ( pd.DataFrame({"word": distinct_words}) .reset_index() .rename(columns={"index": "wordID"}) ) combined_data.head() combined_vocab.head() # Associate the 'wordID' from the new vocabulary with the combined corpus combined_data = combined_data.merge(combined_vocab, how="left") combined_data = ( combined_data[["docID", "wordID", "count"]] .sort_values(["docID", "wordID"]) .reset_index(drop=True) ) combined_data.head() # Now all three corpora are combined into a single corpus and vocabulary, where each word and document has its own unique id. # # Create a word-document matrix from this corpus # Create a term-document matrix from the combined corpus tdm = ( combined_data.pivot(index="wordID", columns="docID", values="count") .fillna(0.0) .astype(pd.SparseDtype("float", 0.0)) ) print(tdm) # The fillna(0.0) function replaces any missing values (NaN) with zeros, # and astype(pd.SparseDtype("float", 0.0)) converts the dataframe to a # sparse matrix with a float datatype to save memory, as most of the # values in the matrix would be zero. # # Using SKLearn, find the truncated singular value decomposition of this matrix, retaining the first 100 dimensions # Apply TruncatedSVD to reduce the dimensionality of the term-document matrix svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42) svd_output = svd.fit_transform(tdm) svd_output.shape word_svd = pd.DataFrame(svd_output, index=combined_vocab["word"]) word_svd.head() word_svd.loc[:, 0].describe() # We noticed that many words in this topic are not relevant, but some documents above a certain level seem to be closely related. We can set a threshold and examine the top 20 terms that are most associated with this topic. topic_num = 0 cutoff = word_svd.loc[:, topic_num].quantile(0.75) print(f"Topic: {topic_num}") # Retrieve the top 20 words related to the topic top_words = list( word_svd[word_svd[topic_num] > cutoff] .sort_values([topic_num], ascending=False) .index )[:20] print(top_words) # - Are these dimensions interpretable? # Yes, the dimensions are topics. # - What does dimension 1 represent? # We will merge the SVD array with the actual words and examine dimension 1 (column 0) to interpret the first dimension. # - What do the top 10 dimensions represent? # Top 10 Topics. for topic in range(10): threshold = word_svd.loc[:, topic].quantile(0.999) print(f"Topic: {topic}") # Get the top 10 words associated with the topic words = list( word_svd[word_svd[topic] > threshold] .sort_values([topic], ascending=False) .index )[:20] print(words) # # Determine the average cosine similarity between documents within in each corpus. Next, determine the average cosine similarity between documents across corpora # # Read in the Enron dataset and reduce its size enron = load_bow_data("enron") enron = enron[enron["docID"].isin(selected_doc_ids)].reset_index(drop=True) # Read in the Kos and NIPS datasets kos = load_bow_data("kos") nips = load_bow_data("nips") # Initialize a list to store average cosine similarities average_cos_sims = [] # Create term-document matrices for Enron, Kos, and NIPS datasets and calculate cosine similarities datasets = [("enron", enron), ("kos", kos), ("nips", nips)] for name, data in datasets: tdm = ( data.pivot(index="docID", columns="wordID", values="count") .fillna(0) .astype(pd.SparseDtype("int16", 0)) ) similarities = cosine_similarity(tdm, dense_output=False) average_cos_sims.append((name, similarities.mean())) # Create term-document matrix for the combined dataset and calculate cosine similarities combined_tdm = ( combined_data.pivot(index="docID", columns="wordID", values="count") .fillna(0) .astype(pd.SparseDtype("int16", 0)) ) combined_sims = cosine_similarity(combined_tdm, dense_output=False) average_cos_sims.append(("all", combined_sims.mean())) # Create a dataframe to display the average cosine similarities for each corpus cosine_sims_df = pd.DataFrame( average_cos_sims, columns=["corpus", "average_cosine_similarity"] ) cosine_sims_df cosine_sims_df # # LSA from sklearn.cluster import KMeans tdm = ( combined_data.pivot(index="wordID", columns="docID", values="count") .fillna(0.0) .astype(pd.SparseDtype("float", 0.0)) ) dtm = tdm.T dtm # lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False)) # X_lsa = lsa.fit_transform(dtm) # X_lsa = lsa.fit_transform(dtm) # explained_variance = lsa[0].explained_variance_ratio_.sum() # print(f"Explained variance of the SVD step: {explained_variance * 100:.1f}%") # Create an LSA pipeline with TruncatedSVD and Normalizer lsa_pipeline = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False)) # Apply the LSA pipeline to the DTM X_lsa_transformed = lsa_pipeline.fit_transform(dtm) # Calculate the explained variance total_explained_variance = lsa_pipeline[0].explained_variance_ratio_.sum() n_clusters = 7 kmeans = KMeans(n_clusters=n_clusters).fit(X_lsa_transformed) # Assign labels to the DTM labels = kmeans.labels_ dtm["label"] = labels # Display the number of documents in each cluster dtm["label"].value_counts() # Print the top 10 words for each cluster for cluster_num in range(n_clusters): print(f"Cluster: {cluster_num}") current_cluster = dtm[dtm["label"] == cluster_num] top_ten_words = set( current_cluster.sum().sort_values(ascending=False).head(10).index ) print(combined_vocab[combined_vocab["wordID"].isin(top_ten_words)]["word"].tolist()) # # PCA # Create a document-term matrix dtm = combined_data.pivot(index="wordID", columns="docID", values="count").fillna(0.0) # Perform PCA on the DTM pca = PCA(n_components=100) pca_transformed = pca.fit_transform(dtm) # Create a DataFrame of PCA components and words word_pca = pd.DataFrame(pca_transformed, index=combined_vocab["word"]) # Extract the top 10 words for each of the first 10 components for component in range(10): threshold = word_pca.loc[:, component].quantile(0.999) print(f"Component: {component}") # Get the top 10 words associated with the component words = list( word_pca[word_pca[component] > threshold] .sort_values([component], ascending=False) .index )[:20] print(words)
false
0
3,233
0
3,261
3,233
129213581
# # Introduction # Sometimes you're learning about data visuilization and you discover a cool plot, you rush to know how to do it in python, you open your notebook in great excitment, then you remmber that you need to find a data to test a model on it. and at that moment you lose all your energy remmebering all the datasets you know and guessing which one fits the plot you wanna make, well if you had this experince i can feel you. this notebook will provide a quicker solution that gives you the ability to generate your own data according to your needs in various methods. # This methods are also helpfulll if you looking for specific tabular data for testing when you're learning a new model. # # Contents : # ### 1 - Sklearn # ### 2 - Faker # ### 3 - DrawData # # 1 - Generating Synthetic Datasets using Scikit-Learn # Sicket-learn has 3 great methods to build data in different types import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification, make_blobs, make_regression # ### 1.1 - Generating a Classification Dataset # The make_classification function generates a random n-class classification problem. We can use this function to generate a synthetic dataset for testing classification models. It creates a dataset by drawing samples from a random Gaussian distribution. The function takes several parameters: # - n_samples: the number of samples to generate # - n_features: the number of features in each sample # - n_classes: the number of classes in the dataset # - n_clusters_per_class: the number of clusters per class # - n_informative: the number of informative features # The generated dataset is a set of points in a high-dimensional space, where the coordinates of each point represent the features of the sample. The points are labeled with integers corresponding to their class. # Let's generate a dataset with 100 samples and 2 classes: X, y = make_classification( n_samples=200, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, weights=(0.3,), random_state=110, ) # Now, let's visualize the dataset: plt.scatter(X[:, 0], X[:, 1], c=y) plt.xlabel("Feature 1") plt.ylabel("Feature 2") plt.title("Make Classification Dataset") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/213/129213581.ipynb
null
null
[{"Id": 129213581, "ScriptId": 38414490, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9496454, "CreationDate": "05/11/2023 22:22:37", "VersionNumber": 1.0, "Title": "Generate Data for Visualization/Modeling \ud83d\udcca", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Introduction # Sometimes you're learning about data visuilization and you discover a cool plot, you rush to know how to do it in python, you open your notebook in great excitment, then you remmber that you need to find a data to test a model on it. and at that moment you lose all your energy remmebering all the datasets you know and guessing which one fits the plot you wanna make, well if you had this experince i can feel you. this notebook will provide a quicker solution that gives you the ability to generate your own data according to your needs in various methods. # This methods are also helpfulll if you looking for specific tabular data for testing when you're learning a new model. # # Contents : # ### 1 - Sklearn # ### 2 - Faker # ### 3 - DrawData # # 1 - Generating Synthetic Datasets using Scikit-Learn # Sicket-learn has 3 great methods to build data in different types import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification, make_blobs, make_regression # ### 1.1 - Generating a Classification Dataset # The make_classification function generates a random n-class classification problem. We can use this function to generate a synthetic dataset for testing classification models. It creates a dataset by drawing samples from a random Gaussian distribution. The function takes several parameters: # - n_samples: the number of samples to generate # - n_features: the number of features in each sample # - n_classes: the number of classes in the dataset # - n_clusters_per_class: the number of clusters per class # - n_informative: the number of informative features # The generated dataset is a set of points in a high-dimensional space, where the coordinates of each point represent the features of the sample. The points are labeled with integers corresponding to their class. # Let's generate a dataset with 100 samples and 2 classes: X, y = make_classification( n_samples=200, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, weights=(0.3,), random_state=110, ) # Now, let's visualize the dataset: plt.scatter(X[:, 0], X[:, 1], c=y) plt.xlabel("Feature 1") plt.ylabel("Feature 2") plt.title("Make Classification Dataset") plt.show()
false
0
577
0
577
577
129213651
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 conn = sqlite3.connect("../input/scale-model-cars-database/stores (1).db") cursor = conn.cursor() tables = pd.read_sql( """SELECT c.*, ci."Number of attributes" FROM ( SELECT "Customers" as "Name", COUNT(*) AS "Number of Rows" FROM customers) AS c INNER JOIN ( SELECT "Customers" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('customers') ) ci ON ci.Name = c.Name UNION SELECT e.*, ei."Number of attributes" FROM ( SELECT "Employees" as "Name", COUNT(*) AS "Number of Rows" FROM employees) AS e INNER JOIN ( SELECT "Employees" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('employees') ) ei ON ei.Name = e.Name UNION SELECT o.*, oi."Number of attributes" FROM ( SELECT "Offices" as "Name", COUNT(*) AS "Number of Rows" FROM offices) AS o INNER JOIN ( SELECT "Offices" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('offices') ) oi ON oi.Name = o.Name UNION SELECT od.*, odi."Number of attributes" FROM ( SELECT "Orderdetails" as "Name", COUNT(*) AS "Number of Rows" FROM orderdetails) AS od INNER JOIN ( SELECT "Orderdetails" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('orderdetails') ) odi ON odi.Name = od.Name UNION SELECT ord.*, ordi."Number of attributes" FROM ( SELECT "Orders" as "Name", COUNT(*) AS "Number of Rows" FROM orders) AS ord INNER JOIN ( SELECT "Orders" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('orders') ) ordi ON ordi.Name = ord.Name UNION SELECT p.*, pi."Number of attributes" FROM ( SELECT "Payments" as "Name", COUNT(*) AS "Number of Rows" FROM payments) AS p INNER JOIN ( SELECT "Payments" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('payments') ) pi ON pi.Name = p.Name UNION SELECT pl.*, pli."Number of attributes" FROM ( SELECT "Productlines" as "Name", COUNT(*) AS "Number of Rows" FROM productlines) AS pl INNER JOIN ( SELECT "Productlines" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('productlines') ) pli ON pli.Name = pl.Name UNION SELECT pr.*, pir."Number of attributes" FROM ( SELECT "Products" as "Name", COUNT(*) AS "Number of Rows" FROM products) AS pr INNER JOIN ( SELECT "Products" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('products') ) pir ON pir.Name = pr.Name;""", conn, ) tables
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/213/129213651.ipynb
null
null
[{"Id": 129213651, "ScriptId": 38414340, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15006812, "CreationDate": "05/11/2023 22:23:49", "VersionNumber": 1.0, "Title": "Customers and products analysis SQL", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 conn = sqlite3.connect("../input/scale-model-cars-database/stores (1).db") cursor = conn.cursor() tables = pd.read_sql( """SELECT c.*, ci."Number of attributes" FROM ( SELECT "Customers" as "Name", COUNT(*) AS "Number of Rows" FROM customers) AS c INNER JOIN ( SELECT "Customers" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('customers') ) ci ON ci.Name = c.Name UNION SELECT e.*, ei."Number of attributes" FROM ( SELECT "Employees" as "Name", COUNT(*) AS "Number of Rows" FROM employees) AS e INNER JOIN ( SELECT "Employees" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('employees') ) ei ON ei.Name = e.Name UNION SELECT o.*, oi."Number of attributes" FROM ( SELECT "Offices" as "Name", COUNT(*) AS "Number of Rows" FROM offices) AS o INNER JOIN ( SELECT "Offices" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('offices') ) oi ON oi.Name = o.Name UNION SELECT od.*, odi."Number of attributes" FROM ( SELECT "Orderdetails" as "Name", COUNT(*) AS "Number of Rows" FROM orderdetails) AS od INNER JOIN ( SELECT "Orderdetails" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('orderdetails') ) odi ON odi.Name = od.Name UNION SELECT ord.*, ordi."Number of attributes" FROM ( SELECT "Orders" as "Name", COUNT(*) AS "Number of Rows" FROM orders) AS ord INNER JOIN ( SELECT "Orders" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('orders') ) ordi ON ordi.Name = ord.Name UNION SELECT p.*, pi."Number of attributes" FROM ( SELECT "Payments" as "Name", COUNT(*) AS "Number of Rows" FROM payments) AS p INNER JOIN ( SELECT "Payments" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('payments') ) pi ON pi.Name = p.Name UNION SELECT pl.*, pli."Number of attributes" FROM ( SELECT "Productlines" as "Name", COUNT(*) AS "Number of Rows" FROM productlines) AS pl INNER JOIN ( SELECT "Productlines" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('productlines') ) pli ON pli.Name = pl.Name UNION SELECT pr.*, pir."Number of attributes" FROM ( SELECT "Products" as "Name", COUNT(*) AS "Number of Rows" FROM products) AS pr INNER JOIN ( SELECT "Products" as "Name", COUNT(*) as "Number of attributes" FROM PRAGMA_TABLE_INFO('products') ) pir ON pir.Name = pr.Name;""", conn, ) tables
false
0
843
0
843
843
129213121
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are3 available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import zipfile from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso, Ridge, ElasticNet from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error from sklearn.ensemble import RandomForestRegressor t = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/train.csv.zip") t.extractall() df = pd.read_csv("/kaggle/working/train.csv") # Preencha os valores ausentes num_cols = df.select_dtypes(include=["number"]).columns cat_cols = df.select_dtypes(include=["object"]).columns for col in num_cols: df[col] = df[col].fillna(df[col].mean()) for col in cat_cols: df[col] = df[col].fillna(df[col].mode().iloc[0]) """df2 = df for x in num_cols: if df2[x].isna().mean() > 0.2: print(x, ' \t \t', df2[x].isna().mean()*100) df2 = df2.drop(x,axis=1)""" from sklearn.preprocessing import LabelEncoder """for f in df.columns: if df[f].dtype=='object': lbl = LabelEncoder() lbl.fit(list(df[f].values)) df[f] = lbl.transform(list(df[f].values))""" df = df.interpolate() df[(df["floor"]) == 33] df.drop(df.index[7457], inplace=True) # Colunas que devem ser consideradas e Coluna Target X = df[ ["full_sq", "life_sq", "floor", "max_floor", "material", "build_year", "num_room"] ] y = np.log(df.price_doc) X.columns # Separação do arquivo de teste em Treino e Teste from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) import pandas as pd num_linhas = len(df) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") """modelo = ElasticNet(alpha=10000) modelo.fit(X_train, y_train)""" modelo = Lasso() modelo.fit(X_train, y_train) y_pred = modelo.predict(X_train) rmsle = mean_squared_log_error(y_train, y_pred) ** 0.5 print("RMSLE:", rmsle) import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/test.csv.zip") z.extractall() df_test = pd.read_csv("/kaggle/working/test.csv") # Transformando categorias em valores numéricos from sklearn.preprocessing import LabelEncoder for f in df_test.columns: if df_test[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_test[f].values)) df_test[f] = lbl.transform(list(df_test[f].values)) # Interpolação para remover valores nulos import pandas as pd import numpy as np for col in df_test.columns: if df_test[col].isnull().sum() > 0: mean = df_test[col].mean() df_test[col] = df_test[col].fillna(mean) X = df_test[ ["full_sq", "life_sq", "floor", "max_floor", "material", "build_year", "num_room"] ] print(len(df_test.id)) print(len(y_pred)) import pandas as pd num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") y_pred = modelo.predict(X_test) """y_pred = np.exp(y_pred)""" print(len(df_test.id)) print(len(y_pred)) output = pd.DataFrame({"id": df_test.id, "price_doc": y_pred}) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/213/129213121.ipynb
null
null
[{"Id": 129213121, "ScriptId": 38281692, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14612488, "CreationDate": "05/11/2023 22:14:03", "VersionNumber": 4.0, "Title": "Dicas para uso de Arvore de Decis\u00e3o", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 155.0, "LinesInsertedFromPrevious": 85.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 70.0, "LinesInsertedFromFork": 133.0, "LinesDeletedFromFork": 151.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 22.0, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are3 available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import zipfile from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso, Ridge, ElasticNet from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error from sklearn.ensemble import RandomForestRegressor t = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/train.csv.zip") t.extractall() df = pd.read_csv("/kaggle/working/train.csv") # Preencha os valores ausentes num_cols = df.select_dtypes(include=["number"]).columns cat_cols = df.select_dtypes(include=["object"]).columns for col in num_cols: df[col] = df[col].fillna(df[col].mean()) for col in cat_cols: df[col] = df[col].fillna(df[col].mode().iloc[0]) """df2 = df for x in num_cols: if df2[x].isna().mean() > 0.2: print(x, ' \t \t', df2[x].isna().mean()*100) df2 = df2.drop(x,axis=1)""" from sklearn.preprocessing import LabelEncoder """for f in df.columns: if df[f].dtype=='object': lbl = LabelEncoder() lbl.fit(list(df[f].values)) df[f] = lbl.transform(list(df[f].values))""" df = df.interpolate() df[(df["floor"]) == 33] df.drop(df.index[7457], inplace=True) # Colunas que devem ser consideradas e Coluna Target X = df[ ["full_sq", "life_sq", "floor", "max_floor", "material", "build_year", "num_room"] ] y = np.log(df.price_doc) X.columns # Separação do arquivo de teste em Treino e Teste from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) import pandas as pd num_linhas = len(df) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") """modelo = ElasticNet(alpha=10000) modelo.fit(X_train, y_train)""" modelo = Lasso() modelo.fit(X_train, y_train) y_pred = modelo.predict(X_train) rmsle = mean_squared_log_error(y_train, y_pred) ** 0.5 print("RMSLE:", rmsle) import zipfile z = zipfile.ZipFile("/kaggle/input/sberbank-russian-housing-market/test.csv.zip") z.extractall() df_test = pd.read_csv("/kaggle/working/test.csv") # Transformando categorias em valores numéricos from sklearn.preprocessing import LabelEncoder for f in df_test.columns: if df_test[f].dtype == "object": lbl = LabelEncoder() lbl.fit(list(df_test[f].values)) df_test[f] = lbl.transform(list(df_test[f].values)) # Interpolação para remover valores nulos import pandas as pd import numpy as np for col in df_test.columns: if df_test[col].isnull().sum() > 0: mean = df_test[col].mean() df_test[col] = df_test[col].fillna(mean) X = df_test[ ["full_sq", "life_sq", "floor", "max_floor", "material", "build_year", "num_room"] ] print(len(df_test.id)) print(len(y_pred)) import pandas as pd num_linhas = len(df_test) print("O arquivo 'dados.csv' contém", num_linhas, "linhas.") y_pred = modelo.predict(X_test) """y_pred = np.exp(y_pred)""" print(len(df_test.id)) print(len(y_pred)) output = pd.DataFrame({"id": df_test.id, "price_doc": y_pred}) output.to_csv("submission.csv", index=False)
false
0
1,393
0
1,393
1,393
129387462
<jupyter_start><jupyter_text>pytorch-tabnet # Pytorch-Tabnet Check out the project here: Kaggle dataset identifier: pytorchtabnet <jupyter_script>import numpy as np import pandas as pd from sklearn.metrics import log_loss, f1_score from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") sample_sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) encoder = LabelEncoder() train["EJ"] = encoder.fit_transform(train["EJ"]) test["EJ"] = encoder.fit_transform(test["EJ"]) train = train.dropna().reset_index(drop=True) train["kfold"] = -1 kf = KFold(n_splits=5, shuffle=True, random_state=42) for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=train)): train.loc[valid_indicies, "kfold"] = fold features_cols = train.columns[1:-2] label = train.columns[-2] import lightgbm as lgb final_valid_predictions = {} final_test_predictions = [] scores = [] s = [] for k in range(5): train_df = train[train["kfold"] != k].reset_index(drop=True) val_df = train[train["kfold"] == k].reset_index(drop=True) valid_ids = val_df.Id.values.tolist() X_train = train_df[features_cols].values y_train = train_df[label].values X_val = val_df[features_cols].values y_val = val_df[label].values model = lgb.LGBMClassifier() model.fit(X_train, y_train) preds_valid = model.predict_proba(X_val) preds_test = model.predict_proba(test[features_cols].values) final_test_predictions.append(preds_test) final_valid_predictions.update(dict(zip(valid_ids, preds_valid))) logloss = log_loss(y_val, preds_valid) s.append(logloss) print(k, logloss) print(s) print(np.mean(s), np.std(s)) final_valid_predictions = pd.DataFrame.from_dict( final_valid_predictions, orient="index" ).reset_index() final_valid_predictions.columns = ["Id", "class_0", "class_1"] final_valid_predictions.to_csv(r"oof.csv", index=False) final_test_predictions = ( final_test_predictions[0] + final_test_predictions[1] + final_test_predictions[2] + final_test_predictions[3] + final_test_predictions[4] ) / 5 test_dict = {} test_dict.update(dict(zip(test.Id.values.tolist(), final_test_predictions))) submission = pd.DataFrame.from_dict(test_dict, orient="index").reset_index() submission.columns = ["Id", "class_0", "class_1"] submission.to_csv(r"submission.csv", index=False) submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/387/129387462.ipynb
pytorchtabnet
ryati131457
[{"Id": 129387462, "ScriptId": 38471654, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9468685, "CreationDate": "05/13/2023 11:05:29", "VersionNumber": 1.0, "Title": "LGBM Classifier - ICR", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 72.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 26.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 72.0, "TotalVotes": 8}]
[{"Id": 185388285, "KernelVersionId": 129387462, "SourceDatasetVersionId": 5177068}]
[{"Id": 5177068, "DatasetId": 921302, "DatasourceVersionId": 5249101, "CreatorUserId": 131457, "LicenseName": "Unknown", "CreationDate": "03/16/2023 11:00:18", "VersionNumber": 20.0, "Title": "pytorch-tabnet", "Slug": "pytorchtabnet", "Subtitle": "pytorch tabnet implementation. includes version 2.0.1", "Description": "# Pytorch-Tabnet\nCheck out the project here:", "VersionNotes": "new version", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 921302, "CreatorUserId": 131457, "OwnerUserId": 131457.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6182088.0, "CurrentDatasourceVersionId": 6261224.0, "ForumId": 937178, "Type": 2, "CreationDate": "10/14/2020 15:55:43", "LastActivityDate": "10/14/2020", "TotalViews": 8593, "TotalDownloads": 1269, "TotalVotes": 58, "TotalKernels": 194}]
[{"Id": 131457, "UserName": "ryati131457", "DisplayName": "ryati", "RegisterDate": "10/06/2013", "PerformanceTier": 2}]
import numpy as np import pandas as pd from sklearn.metrics import log_loss, f1_score from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") sample_sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) encoder = LabelEncoder() train["EJ"] = encoder.fit_transform(train["EJ"]) test["EJ"] = encoder.fit_transform(test["EJ"]) train = train.dropna().reset_index(drop=True) train["kfold"] = -1 kf = KFold(n_splits=5, shuffle=True, random_state=42) for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=train)): train.loc[valid_indicies, "kfold"] = fold features_cols = train.columns[1:-2] label = train.columns[-2] import lightgbm as lgb final_valid_predictions = {} final_test_predictions = [] scores = [] s = [] for k in range(5): train_df = train[train["kfold"] != k].reset_index(drop=True) val_df = train[train["kfold"] == k].reset_index(drop=True) valid_ids = val_df.Id.values.tolist() X_train = train_df[features_cols].values y_train = train_df[label].values X_val = val_df[features_cols].values y_val = val_df[label].values model = lgb.LGBMClassifier() model.fit(X_train, y_train) preds_valid = model.predict_proba(X_val) preds_test = model.predict_proba(test[features_cols].values) final_test_predictions.append(preds_test) final_valid_predictions.update(dict(zip(valid_ids, preds_valid))) logloss = log_loss(y_val, preds_valid) s.append(logloss) print(k, logloss) print(s) print(np.mean(s), np.std(s)) final_valid_predictions = pd.DataFrame.from_dict( final_valid_predictions, orient="index" ).reset_index() final_valid_predictions.columns = ["Id", "class_0", "class_1"] final_valid_predictions.to_csv(r"oof.csv", index=False) final_test_predictions = ( final_test_predictions[0] + final_test_predictions[1] + final_test_predictions[2] + final_test_predictions[3] + final_test_predictions[4] ) / 5 test_dict = {} test_dict.update(dict(zip(test.Id.values.tolist(), final_test_predictions))) submission = pd.DataFrame.from_dict(test_dict, orient="index").reset_index() submission.columns = ["Id", "class_0", "class_1"] submission.to_csv(r"submission.csv", index=False) submission
false
4
844
8
879
844
129387265
<jupyter_start><jupyter_text>Drug Addiction in Bangladesh - Reasons: SMOTE Kaggle dataset identifier: drug-addiction-in-bangladesh-reasons-smote <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/suicidedata-cleaned-binaryrslt/4. suicidedata_cleaned_binaryRslt.csv" ) # Libraries from sklearn.model_selection import train_test_split import keras # padding from tensorflow.keras.preprocessing.sequence import pad_sequences import numpy as np from keras.preprocessing.text import Tokenizer import tensorflow as tf from sklearn.preprocessing import LabelEncoder # data preparation # df = df.dropna(subset=['text']) df["text"] = df["text"].astype(str) # ============================================================================= # Tokenization # ============================================================================= tokenizer = Tokenizer() tokenizer.fit_on_texts(df["text"]) sequences = tokenizer.texts_to_sequences(df["text"]) print(sequences[0]) # find the max len of this sequences max_len = max(len(item) for item in sequences) print(max_len) # word Freq Df word_freq_df = ( pd.DataFrame(tokenizer.word_counts.items(), columns=["words", "count"]) .sort_values(by="count", ascending=False) .reset_index(drop=True) ) # ============================================================================= # # LabelEncoding(categorical to numerical) # ============================================================================= # # Convert the 'suicide_ideation' column to numpy array output_labeled = np.array(df["suicide_ideation"]) # Padding Data to the highest Length of Text # ============================================================================= # Padding Sequences # ============================================================================= # Pad sequences to a fixed length max_length = max_len # Specify the desired sequence length padded_sequences = pad_sequences(sequences, maxlen=max_length, padding="pre") # Splitting Data into traingData, Test Data, Variation Data # ============================================================================= # # Splitting Data # ============================================================================= # Splitting the data into training and test sets train_df, test_df = train_test_split( range(len(padded_sequences)), test_size=0.2, random_state=42 ) # Splitting the training data further into training and validation sets train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=42) # for Nympy array # train_df.dtype # for list type(train_df) train_df = np.array(train_df) test_df = np.array(test_df) val_df = np.array(val_df) # Split the output_labeled array according to the train, test, and validation indices train_output = output_labeled[train_df] test_output = output_labeled[test_df] val_output = output_labeled[val_df] print("done") # Golobe Embeding # glove_embedding={} import pickle with open( "/kaggle/input/pickled-glove840b300d-for-10sec-loading/glove.840B.300d.pkl", "rb" ) as fp: glove_embedding = pickle.load(fp) voc_size = len(tokenizer.word_index) # create an empty Embeding Matrix embedding_matrix = np.zeros((voc_size + 1, 300), dtype=float) # Fill the Matrix with pretrained model vocab for word, idx in tokenizer.word_index.items(): embedding_vector = glove_embedding.get(word) if embedding_vector is not None: embedding_matrix[idx] = embedding_vector # Early Stop & Learning Rate patience_values = [3, 5, 7] # Example range of patience values to test best_performance = 0 # Track the best performance achieved best_params = None # Track the best parameter combination for early_stop_patience in patience_values: for reducelr_patience in patience_values: # Create the Early Stopping and Learning Rate Reduction callbacks with the current patience values early_stop = EarlyStopping(patience=early_stop_patience) reducelr = ReduceLROnPlateau(patience=reducelr_patience) # Train your model with these callbacks model = Sequential() model.add(Input(shape=(40,))) model.add( Embedding(voc_size + 1, 300, weights=[embedding_matrix], trainable=False) ) model.add(LSTM(20, return_sequences=True)) model.add(GlobalMaxPooling1D()) # model.add(Dropout(0.3)) model.add(Dense(256, activation="relu")) # model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile( optimizer=keras.optimizers.SGD(0.1, momentum=0.09), loss="binary_crossentropy", metrics=["accuracy"], ) # Evaluate the model's performance on the validation set r = model.fit( train_df, train_output, validation_data=(test_df, test_output), epochs=20, batch_size=256, callbacks=[early_stop, reducelr], ) # Compare the performance and update the best parameters if necessary if performance > best_performance: best_performance = performance best_params = (early_stop_patience, reducelr_patience) print("Best parameter combination:", best_params) print("Best performance:", best_performance) from keras.callbacks import EarlyStopping, ReduceLROnPlateau from keras.models import Sequential from keras.layers import ( Embedding, Dense, LSTM, Bidirectional, GlobalMaxPooling1D, Input, Dropout, ) early_stop = EarlyStopping(patience=5) reducelr = ReduceLROnPlateau(patience=3) model = Sequential() model.add(Input(shape=(40,))) model.add(Embedding(voc_size + 1, 300, weights=[embedding_matrix], trainable=False)) model.add(LSTM(20, return_sequences=True)) model.add(GlobalMaxPooling1D()) # model.add(Dropout(0.3)) model.add(Dense(256, activation="relu")) # model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile( optimizer=keras.optimizers.SGD(0.1, momentum=0.09), loss="binary_crossentropy", metrics=["accuracy"], ) # Evaluate the model's performance on the validation set r = model.fit( train_df, train_output, validation_data=(test_df, test_output), epochs=20, batch_size=256, callbacks=[early_stop, reducelr], )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/387/129387265.ipynb
drug-addiction-in-bangladesh-reasons-smote
protikmostafa
[{"Id": 129387265, "ScriptId": 38180316, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14894065, "CreationDate": "05/13/2023 11:03:06", "VersionNumber": 2.0, "Title": "Tonmoy", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 176.0, "LinesInsertedFromPrevious": 148.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 28.0, "LinesInsertedFromFork": 145.0, "LinesDeletedFromFork": 142.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 31.0, "TotalVotes": 0}]
[{"Id": 185387929, "KernelVersionId": 129387265, "SourceDatasetVersionId": 2088113}, {"Id": 185387927, "KernelVersionId": 129387265, "SourceDatasetVersionId": 1964749}, {"Id": 185387926, "KernelVersionId": 129387265, "SourceDatasetVersionId": 1338480}, {"Id": 185387925, "KernelVersionId": 129387265, "SourceDatasetVersionId": 380554}]
[{"Id": 2088113, "DatasetId": 1251960, "DatasourceVersionId": 2128536, "CreatorUserId": 3955584, "LicenseName": "Unknown", "CreationDate": "04/05/2021 08:19:02", "VersionNumber": 1.0, "Title": "Drug Addiction in Bangladesh - Reasons: SMOTE", "Slug": "drug-addiction-in-bangladesh-reasons-smote", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1251960, "CreatorUserId": 3955584, "OwnerUserId": 3955584.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2088113.0, "CurrentDatasourceVersionId": 2128536.0, "ForumId": 1270305, "Type": 2, "CreationDate": "04/05/2021 08:19:02", "LastActivityDate": "04/05/2021", "TotalViews": 2717, "TotalDownloads": 235, "TotalVotes": 12, "TotalKernels": 2}]
[{"Id": 3955584, "UserName": "protikmostafa", "DisplayName": "Mostafa Mohiuddin Jalal Protik", "RegisterDate": "10/31/2019", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/suicidedata-cleaned-binaryrslt/4. suicidedata_cleaned_binaryRslt.csv" ) # Libraries from sklearn.model_selection import train_test_split import keras # padding from tensorflow.keras.preprocessing.sequence import pad_sequences import numpy as np from keras.preprocessing.text import Tokenizer import tensorflow as tf from sklearn.preprocessing import LabelEncoder # data preparation # df = df.dropna(subset=['text']) df["text"] = df["text"].astype(str) # ============================================================================= # Tokenization # ============================================================================= tokenizer = Tokenizer() tokenizer.fit_on_texts(df["text"]) sequences = tokenizer.texts_to_sequences(df["text"]) print(sequences[0]) # find the max len of this sequences max_len = max(len(item) for item in sequences) print(max_len) # word Freq Df word_freq_df = ( pd.DataFrame(tokenizer.word_counts.items(), columns=["words", "count"]) .sort_values(by="count", ascending=False) .reset_index(drop=True) ) # ============================================================================= # # LabelEncoding(categorical to numerical) # ============================================================================= # # Convert the 'suicide_ideation' column to numpy array output_labeled = np.array(df["suicide_ideation"]) # Padding Data to the highest Length of Text # ============================================================================= # Padding Sequences # ============================================================================= # Pad sequences to a fixed length max_length = max_len # Specify the desired sequence length padded_sequences = pad_sequences(sequences, maxlen=max_length, padding="pre") # Splitting Data into traingData, Test Data, Variation Data # ============================================================================= # # Splitting Data # ============================================================================= # Splitting the data into training and test sets train_df, test_df = train_test_split( range(len(padded_sequences)), test_size=0.2, random_state=42 ) # Splitting the training data further into training and validation sets train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=42) # for Nympy array # train_df.dtype # for list type(train_df) train_df = np.array(train_df) test_df = np.array(test_df) val_df = np.array(val_df) # Split the output_labeled array according to the train, test, and validation indices train_output = output_labeled[train_df] test_output = output_labeled[test_df] val_output = output_labeled[val_df] print("done") # Golobe Embeding # glove_embedding={} import pickle with open( "/kaggle/input/pickled-glove840b300d-for-10sec-loading/glove.840B.300d.pkl", "rb" ) as fp: glove_embedding = pickle.load(fp) voc_size = len(tokenizer.word_index) # create an empty Embeding Matrix embedding_matrix = np.zeros((voc_size + 1, 300), dtype=float) # Fill the Matrix with pretrained model vocab for word, idx in tokenizer.word_index.items(): embedding_vector = glove_embedding.get(word) if embedding_vector is not None: embedding_matrix[idx] = embedding_vector # Early Stop & Learning Rate patience_values = [3, 5, 7] # Example range of patience values to test best_performance = 0 # Track the best performance achieved best_params = None # Track the best parameter combination for early_stop_patience in patience_values: for reducelr_patience in patience_values: # Create the Early Stopping and Learning Rate Reduction callbacks with the current patience values early_stop = EarlyStopping(patience=early_stop_patience) reducelr = ReduceLROnPlateau(patience=reducelr_patience) # Train your model with these callbacks model = Sequential() model.add(Input(shape=(40,))) model.add( Embedding(voc_size + 1, 300, weights=[embedding_matrix], trainable=False) ) model.add(LSTM(20, return_sequences=True)) model.add(GlobalMaxPooling1D()) # model.add(Dropout(0.3)) model.add(Dense(256, activation="relu")) # model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile( optimizer=keras.optimizers.SGD(0.1, momentum=0.09), loss="binary_crossentropy", metrics=["accuracy"], ) # Evaluate the model's performance on the validation set r = model.fit( train_df, train_output, validation_data=(test_df, test_output), epochs=20, batch_size=256, callbacks=[early_stop, reducelr], ) # Compare the performance and update the best parameters if necessary if performance > best_performance: best_performance = performance best_params = (early_stop_patience, reducelr_patience) print("Best parameter combination:", best_params) print("Best performance:", best_performance) from keras.callbacks import EarlyStopping, ReduceLROnPlateau from keras.models import Sequential from keras.layers import ( Embedding, Dense, LSTM, Bidirectional, GlobalMaxPooling1D, Input, Dropout, ) early_stop = EarlyStopping(patience=5) reducelr = ReduceLROnPlateau(patience=3) model = Sequential() model.add(Input(shape=(40,))) model.add(Embedding(voc_size + 1, 300, weights=[embedding_matrix], trainable=False)) model.add(LSTM(20, return_sequences=True)) model.add(GlobalMaxPooling1D()) # model.add(Dropout(0.3)) model.add(Dense(256, activation="relu")) # model.add(Dropout(0.2)) model.add(Dense(1, activation="sigmoid")) model.compile( optimizer=keras.optimizers.SGD(0.1, momentum=0.09), loss="binary_crossentropy", metrics=["accuracy"], ) # Evaluate the model's performance on the validation set r = model.fit( train_df, train_output, validation_data=(test_df, test_output), epochs=20, batch_size=256, callbacks=[early_stop, reducelr], )
false
1
1,823
0
1,870
1,823
129387428
<jupyter_start><jupyter_text>Hotel booking demand ### Context Have you ever wondered when the best time of year to book a hotel room is? Or the optimal length of stay in order to get the best daily rate? What if you wanted to predict whether or not a hotel was likely to receive a disproportionately high number of special requests? This hotel booking dataset can help you explore those questions! ### Content This data set contains booking information for a city hotel and a resort hotel, and includes information such as when the booking was made, length of stay, the number of adults, children, and/or babies, and the number of available parking spaces, among other things. All personally identifying information has been removed from the data. Kaggle dataset identifier: hotel-booking-demand <jupyter_script># # **Project Name - Hotel Booking EDA** # ##### **Project Type** - EDA # ##### **Contribution** - Individual # ##### **Contributor** - Manash Jyoti Borah # # **Problem Statement** # This hotel booking dataset can help us explore many questions regarding cancelations of bookings, how to increase revenue and improve efficiency, which is the busiest period in hotel industry etc. This data set contains booking information for a city hotel and a resort hotel, and includes information such as when the booking was made, length of stay, the number of adults, children, and/or babies, and the number of available parking spaces, among other things. All personally identifying information has been removed from the data. We will explore and analyse the data to discover important factors and draw key insights that govern the bookings. # #### **Business Objective** # ***Main objective is to identify key trends and patterns in the data that can be used to improve business strategies in the hotel industry*** # Understanding the travel patterns and preferences of their guests is important for hotels in order to make informed decisions about things like peak seasons, common reasons for cancellations, demand for amenities such as parking, and the number of children staying at the property. For many people, traveling is a thrilling and meaningful experience. # Importing all the required python modules and libraries import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime as dt, date import plotly.express as px import warnings warnings.filterwarnings("ignore") # upgrading matplotlib to the latest version so that we can use the latest features # Loading the dataset path = "/kaggle/input/hotel-booking-demand/hotel_bookings.csv" booking_df = pd.read_csv(path) # Checking the first 5 rows of the dataset booking_df.head() # * EDA involves investigating the data and making some useful insights out of it. # * We can explore the data and analyse it using different plots obtained with the use of libraries like *matplotlib* and *seaborn*. # * In the **non-graphical approach**, we will be using functions such as shape, summary, describe, isnull, info, datatypes and more. # * In the **graphical approach**, we will be using plots such as pie, scatter, box, bar, density and correlation plots. # ### 1. Basic information about the dataset: # basic information of the dataset booking_df.info() # shape of the dataframe booking_df.shape # # * The dataset has 119390 rows and 32 columns. # * Some of the columns need conversion of datatypes. # * Further, we will add new columns to extract insights from the dataset. # # brief description of the numerical columns of the dataset booking_df.describe() # brief description of the categorical columns of the dataset booking_df.describe(include="object") # creating a copy of the dataframe so that original one remains intact without any alteration booking_df_c = booking_df.copy() # ### 2. Checking for null values: # checking if any of the columns has null values, and if found, sorting them in descending order of number of null values booking_df_c.isnull().sum().sort_values(ascending=False) # * From the above data, it can be seen that most of the hotels listed don't # belong to any company ie. they are mostly privately run. Most of the people preferred booking on their own than opting for any agent. Hence, there are so many **null** values in columns namely "company" and "agent". \\ # # * In few of the cases, country name was not inserted leading to **null** values in those entries. So, we will put "**Others**" in place of all the null values in "**country**" column. # * In very few cases, the number of children accompanying the adults are missing and hence, we will fill those entries with the median of the entire "**children**" column. # ### 3. Removing duplicated rows present if any: # checking if the dataset has any duplicated rows sum(booking_df_c.duplicated()) # dropping the duplicate rows booking_df_c.drop_duplicates(inplace=True) # dataframe info after dropping the duplicate rows booking_df_c.info() # After removing the duplicated rows, we have 87396 rows. # Since, the company column has too many **null** values, we can ***drop*** this one to make our analysis more efficient. booking_df_c.drop(columns=["company"], axis=1, inplace=True) # We will ***replace*** all the **null** values of "agent" column with 0. booking_df_c["agent"].fillna(0, inplace=True) # checking for null values in the "country" column sum(booking_df_c["country"].isna()) # Since, many of the country entries are missing, we will fill those values with "Others". booking_df_c["country"].fillna("Others", inplace=True) # Four entries of the children column have **null** values. So, we will fill those entries with value = 0. booking_df_c["children"].fillna(0, inplace=True) # There are a few columns with outliers. So, we would like to remove them before proceeding to actual EDA. booking_df_c = booking_df_c.loc[ (booking_df_c["adr"] < 5000) & (booking_df_c["babies"] < 5) ] # ### 4. Getting an insight of all the unique values of each column of the dataset: pd.Series({col: booking_df_c[col].unique() for col in booking_df_c}) # The columns of the dataframe and the data they represent are listed below: # 1. **hotel** : Name of the hotel namely - Resort Hotel and City Hotel # 2. **is_canceled** : If the booking was canceled (1) or not (0) # 3. **lead_time** : Number of days before the actual arrival of the guests # 4. **arrival_date_year** : Year of arrival date # 5. **arrival_date_month** : Month of arrival date # 6. **arrival_date_week_number** : Week number of the year for arrival date # 7. **arrival_date_day_of_month** : Day of arrival date # 8. **stays_in_weekend_nights** : Number of weekend nights (Saturday or Sunday) spent at the hotel by the guests. # 9. **stays_in_week_nights** : Number of weeknights (Monday to Friday) spent at the hotel by the guests. # 10. **adults** : Number of adults among the guests # 11. **children** : Number of children accompanying the adults # 12. **babies** : Number of babies accompanying the adults # 13. **meal** : Type of meal booked # 14. **country** : Country of origin of the guests # 15. **market_segment** : Designation of market segment # 16. **distribution_channel** : Name of booking distribution channel # 17. **is_repeated_guest** : If the booking was from a repeated guest (1) or not (0) # 18. **previous_cancellations** : Number of previous bookings that were cancelled by the customer prior to the current booking # 19. **previous_bookings_not_canceled** : Number of previous bookings not cancelled by the customer prior to the current booking # 20. **reserved_room_type** : Code of room type reserved # 21. **assigned_room_type** : Code of room type assigned # 22. **booking_changes** : Number of changes made to the booking # 23. **deposit_type** : Type of the deposit made by the guest # 24. **agent** : ID of travel agent who made the booking # 25. **company** : ID of the company that made the booking # 26. **days_in_waiting_list** : Number of days the booking was in the waiting list # 27. **customer_type** : Type of customer, assuming one of four categories # 28. **adr** : Average Daily Rate, as defined by dividing the sum of all lodging transactions by the total number of staying nights # 29. **required_car_parking_spaces** : Number of car parking spaces required by the customer # 30. **total_of_special_requests** : Number of special requests made by the customer # 31. **reservation_status** : Reservation status (Cancelled, Check-Out or No-Show) # 32. **reservation_status_date** : Date at which the last reservation status was updated # It can be observed from the unique values of columns namely "adults", "children" and "babies" that there are entries with value = 0. If the number of adults = 0, then we need to remove those rows as "children" and "babies" can't book a hotel without being accompanied by an adult. filter = ( (booking_df_c["children"] == 0) & (booking_df_c["adults"] == 0) & (booking_df_c["babies"] == 0) ) booking_df_c[filter] booking_df_c.drop(booking_df_c[filter].index, inplace=True) # renaming the values in the is_canceled column to appropriate names to ease the analysis booking_df_c["is_canceled"] = np.where( booking_df_c["is_canceled"] == 0, "not_canceled", "canceled" ) booking_df_c.shape booking_df_c.isnull().sum().sort_index(ascending=False).head() # As we don't have any null values, we can proceed to change the datatypes of the columns wherever required. # ### 5. Converting columns of the dataframe to appropriate datatypes: # converting datatype of columns 'children' and 'agent' from float to int. booking_df_c[["children", "agent"]] = booking_df_c[["children", "agent"]].astype( "int64" ) # converting datatype of column 'reservation_status_date' from str to datetime. booking_df_c["reservation_status_date"] = pd.to_datetime( booking_df_c["reservation_status_date"], format="%Y-%m-%d" ) # ### 6. Adding new columns: # Adding length_of_stay in the hotel as the sum of stays in weekend_nights and week_nights booking_df_c["length_of_stay"] = ( booking_df_c["stays_in_weekend_nights"] + booking_df_c["stays_in_week_nights"] ) # adding revenue which is equal to adr*length_of_stay booking_df_c["revenue"] = [ los * adr if los > 0 else adr for los, adr in zip(booking_df_c["length_of_stay"], booking_df_c["adr"]) ] # adding guest in total booking_df_c["guest_in_total"] = ( booking_df_c["adults"] + booking_df_c["children"] + booking_df_c["babies"] ) # ## **Exploratory Data Analysis** # ### **Q. What is the percentage of booking in each hotel?** # Since, there are two types of hotels - Resort and City hotel which is evident from the series depicting unique values of each column, we can use a *pie chart* or a *bar plot* to display the data. # setting the background sns.set_style("ticks") # setting the color pallette sns.set_palette(sns.color_palette("rainbow")) # changing the default context sns.set_context("talk") fig, ax = plt.subplots(1, 2, figsize=(20, 8)) # plotting the pie plot data = np.array(booking_df_c["hotel"].value_counts().to_list()) labels = np.array(booking_df_c["hotel"].value_counts().index.to_list()) ax[0].pie( data, labels=labels, autopct="%0.2f%%", explode=[0, 0.05], startangle=90, textprops={"fontsize": 18}, shadow=True, ) ax[0].axis("equal") ax[0].set_title( "Pie chart showing percentage of booking of each type of hotel", fontsize=20 ) # plotting the bar plot side by side data_bar = ( booking_df_c["hotel"] .value_counts() .reset_index() .rename(columns={"index": "hotel", "hotel": "count"}) ) ax[1] = sns.barplot(data=data_bar, y="count", x="hotel") ax[1].set_title( "Bar plot showing number of bookings of hotels for each type", fontsize=20 ) ax[1].bar_label(ax[1].containers[0], fontsize=16) ax[1].tick_params(axis="both", labelsize=16) ax[1].set_xlabel("Hotel", fontsize=18) ax[1].set_ylabel("Count of hotels", fontsize=18) plt.tight_layout() plt.show() # # * People prefer City hostels over Resort hotels as they might be cheaper as compared to the later ones. # * Number of bookings for City Hotel is almost 1.57 times more than that of Resort Hotel. # Let us have a look at the booking statistics for each hotel throughout the timeline **2015-2017** data_2015 = booking_df_c[booking_df_c["arrival_date_year"] == 2015] data_2016 = booking_df_c[booking_df_c["arrival_date_year"] == 2016] data_2017 = booking_df_c[booking_df_c["arrival_date_year"] == 2017] ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] fig, ax = plt.subplots(3, 1, sharex=True, figsize=(16, 20)) data = [data_2015, data_2016, data_2017] years = [2015, 2016, 2017] for i, year in enumerate(data): ax[i].set_title(f"{years[i]}", fontsize=16) sns.countplot( data=year, y="arrival_date_month", hue="hotel", order=ordered_month, ax=ax[i] ) ax[i].set_ylabel("Month", fontsize=16) ax[i].set_xlabel("Number of bookings", fontsize=16) plt.title(f"{years[i]}", fontsize=18) plt.suptitle("Number of bookings in each year:", fontsize=20, y=0.92) plt.show() # The dataset contains data from **July, 2015** to **August, 2017**. Hence, we have 3 years of data for **July** and **August**, while for all other months the dataset contains data for only 2 years. Maximum number of bookings for the Resort hotel was reported in **Aug, 2017** while that of for the City hotel was reported in **May, 2017**. # ### **Q. How many bookings were canceled?** # We can split the data into two seperate dataframes containing information of each hotel to ease our analysis. resort_ht = booking_df_c[booking_df_c["hotel"] == "Resort Hotel"] city_ht = booking_df_c[booking_df_c["hotel"] == "City Hotel"] fig = plt.figure(figsize=(22, 10)) grid = plt.GridSpec(2, 4, hspace=0.2, wspace=0.1) ax1 = fig.add_subplot(grid[0:, :2]) ax2 = fig.add_subplot(grid[0, 2:]) ax3 = fig.add_subplot(grid[1, 2:]) # countplot to show booking cancelation in each type of hotel sns.countplot(x="hotel", data=booking_df_c, hue="is_canceled", ax=ax1).set_title( "Hotel Booking Cancellation", fontsize=20 ) ax1.set_xlabel("Hotel type", fontsize=16) ax1.set_ylabel("Count of bookings", fontsize=16) ax1.tick_params(axis="both", labelsize=14) ax1.bar_label(ax1.containers[0], fontsize=14) ax1.bar_label(ax1.containers[1], fontsize=14) ax1.legend(loc=0, fontsize=14) # pieplot to show booking statistics for Resort Hotel and City Hotel resort_data = resort_ht["is_canceled"].value_counts(normalize=True) city_data = city_ht["is_canceled"].value_counts(normalize=True) titles = ["Resort Hotel", "City Hotel"] data = [resort_data, city_data] labels = ["confirmed", "canceled"] explode = [0.05, 0.05] for i, ax in enumerate([ax2, ax3]): ax.pie( data[i], labels=labels, explode=explode, autopct="%0.2f%%", startangle=45, textprops={"fontsize": 16}, shadow=True, ) ax.set_title(f"Booking statistics for {titles[i]}", fontsize=20) # From the above analysis, it has been found that the number of guests booking for the City Hotel is more, likewise the percentage of cancellation sees an analogous trend of being **higher** as compared to that of Resort Hotel. The reasons for cancellation in both the hotels have to be figured out in the analysis later. data_2015 = booking_df_c[booking_df_c["arrival_date_year"] == 2015] data_2016 = booking_df_c[booking_df_c["arrival_date_year"] == 2016] data_2017 = booking_df_c[booking_df_c["arrival_date_year"] == 2017] ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] fig, ax = plt.subplots(3, 1, sharex=True, figsize=(16, 20)) data = [data_2015, data_2016, data_2017] years = [2015, 2016, 2017] for i, year in enumerate(data): ax[i].set_title(f"{years[i]}", fontsize=16) sns.countplot( data=year, y="arrival_date_month", hue="is_canceled", hue_order=["not_canceled", "canceled"], order=ordered_month, ax=ax[i], ) ax[i].set_ylabel("Month", fontsize=16) ax[i].set_xlabel("booking statistics", fontsize=16) plt.title(f"{years[i]}", fontsize=18) plt.suptitle("Number of cancellations in each year:", fontsize=20, y=0.92) plt.show() # Maximum number of cancellations were seen during summer. The number of cancellations increases with increase in the number of bookings. # ### **Q. How long people stay in each hotel?** # Now, let us make a seperate dataframe for all the valid hotel bookings (ie. bookings which were not canceled). confirmed_bookings = booking_df_c[booking_df_c["is_canceled"] == "not_canceled"] # plotting two subplots to show the length-of-stay for each type of hotel fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 14)) # countplot of LOS fig1 = sns.countplot(x="length_of_stay", hue="hotel", data=confirmed_bookings, ax=ax[0]) fig1.set_title( "Countplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig1.set_ylabel("Count of hotels", fontsize=16) fig1.legend(fontsize=14, loc=1) # boxplot of LOS to find outliers present if any fig2 = sns.boxplot(x="length_of_stay", y="hotel", data=confirmed_bookings, ax=ax[1]) fig2.set_title( "Boxplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig2.set_ylabel("Hotel type", fontsize=16) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Length of stay", fontsize=16) plt.tick_params(axis="both", labelsize=14) plt.tight_layout() plt.show() # From the above plots, it can be seen that people usually stay for less than 7 days in either of the hotel type. However, there are a few occasions where length-of-stay exceeds 15 days and reaches up to 69 days. Hence, for ease of analysis, we will not consider the outliers and will consider the length-of-stay up to 14 days only. fig = plt.figure(figsize=(24, 12)) grid = plt.GridSpec(2, 4, hspace=0.2, wspace=0.3) ax1 = fig.add_subplot(grid[0:, :2]) ax2 = fig.add_subplot(grid[0, 2:]) ax3 = fig.add_subplot(grid[1, 2:], sharex=ax2, sharey=ax2) data = confirmed_bookings[confirmed_bookings["length_of_stay"] < 15] # countplot to show LOS in each type of hotel fig1 = sns.countplot(x="length_of_stay", hue="hotel", data=data, ax=ax1) fig1.set_title( "Countplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig1.set_xlabel("Length of stay", fontsize=16) # countplot to show LOS in each type of hotel in weekend nights fig2 = sns.countplot( x="stays_in_weekend_nights", hue="hotel", data=confirmed_bookings, ax=ax2 ) fig2.set_title( "Countplot showing stays-in-weekend-nights in each type of hotel in days", fontsize=18, ) fig2.set_xlabel("Stays in weekend nights", fontsize=16) # countplot to show LOS in each type of hotel in week nights fig3 = sns.countplot( x="stays_in_week_nights", hue="hotel", data=confirmed_bookings, ax=ax3 ) fig3.set_title( "Countplot showing stays-in-week-nights in each type of hotel in days", fontsize=18 ) fig3.set_xlabel("Stays in week nights", fontsize=16) for axis in (ax1, ax2, ax3): plt.sca(axis) plt.ylabel("Count of hotels", fontsize=16) plt.tick_params(axis="both", labelsize=14) plt.legend(loc="upper right", fontsize=14) # Most of the guests usually spend less than a week in each type of hotels. For short duration, people tend to stay in city hotels while for spending longer duration, people prefer resort hotel. Number of nights spent in weekdays is higher as compared that of spent in weekends irrespective of the hotel type. # ### **Q. How significance is it to have a parking space?** # Let us have a overview of the importance of having a parking space in each of the hotel. fig, ax = plt.subplots(1, 2, figsize=(20, 8)) explode = (0.05, 0.05) pie_city = city_ht["required_car_parking_spaces"].value_counts(normalize=True) pie_resort = resort_ht["required_car_parking_spaces"].value_counts(normalize=True) titles = ["City Hotel", "Resort Hotel"] for i, pie in enumerate([pie_city, pie_resort]): ax[i].pie( pie[pie > 0.01], labels=["Parking not required", "Parking required"], autopct="%1.1f%%", explode=explode, startangle=45, textprops={"fontsize": 16}, shadow=True, ) ax[i].set_title(f"Parking Spaces Required - {titles[i]}", fontsize=20) plt.show() # * The pie charts show that a majority of customers in both City and Resort hotel do not have a need for a parking space. However, the proportion of customers who require a parking space is higher in Resort hotel compared to City hotel, reaching **15.9%**. # * This highlights the importance of providing **at least 1** parking space per reservation in resort hotels to meet the demands of their customers. # ### **Q. Which is the most preferred meal type?** # Checking the different types meals served at each hotel: confirmed_bookings.groupby("hotel")["meal"].value_counts() fig, ax = plt.subplots(1, 2, figsize=(16, 8)) pie_city = city_ht["meal"].value_counts() pie_resort = resort_ht["meal"].value_counts() titles = ["City Hotel", "Resort Hotel"] label1 = ["Bed & Breakfast", "Self Catering", "Half Bread", "Full Bread"] label2 = ["Bed & Breakfast", "Half Bread", "Undefined", "Full Bread", "Self Catering"] labels = [label1, label2] explode1 = [0.05, 0.05, 0.05, 0.05] explode2 = [0.05, 0.05, 0.07, 1, 1.5] explode = [explode1, explode2] for i, pie in enumerate([pie_city, pie_resort]): ax[i].pie( pie, labels=labels[i], autopct="%1.2f%%", explode=explode[i], shadow=True, textprops={"fontsize": 18}, ) ax[i].set_title(f"Meals catered at the {titles[i]}", fontsize=20) # plt.tight_layout() plt.show() # * The **Bed and Breakfast** (BB) plan is the most popular meal plan in both hotels, with City Hotel serving 77.26% and Resort Hotel serving 78.77% of the total meals under this plan. This suggests that guests at both hotels prefer a flexible option that allows them to dine outside of the hotel. # * The **Half Board** (HB) plan is more popular at the Resort Hotel as compared to at the City Hotel. This could indicate that guests at the Resort Hotel are more interested in dining at the hotel compared to those at the City Hotel. # * The very low number of meals served under the **FB** plan at both the hotels suggests that this may not be a popular option for guests. # * The low number of meals served under the **SC** plan at both hotels suggests that guests are not interested in this option. The business may want to re-evaluate this offering and determine if it is worth continuing to offer. # * **Undefined** meal plan at Resort Hotel with 410 meals served, suggesting confusion or miscommunication regarding meal options. The business should investigate this further and determine what changes can be made to ensure a clear understanding of the meal options for guests. # ### **Q. Does higher lead time lead to booking cancelations?** plt.figure(figsize=(12, 8)) g = ( sns.FacetGrid(booking_df_c, hue="is_canceled", height=6, aspect=2) .map(sns.kdeplot, "lead_time", shade=True) .add_legend() ) # When the lead time exceeds around **50** days, the cancellation rate among guests increases. To meet the needs of customers, it is crucial to provide accurate, real-time information and offer flexible booking options that allow for easy modifications. This will help to reduce the risk of cancellations and enhance customer satisfaction. cancel_lead_time = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month")["lead_time"] .median() .reindex(ordered_month) ) cancel_count = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month") .size() .reindex(ordered_month) ) fig, ax = plt.subplots(figsize=(12, 6)) ax = cancel_lead_time.plot(kind="line", marker="o") ax.set_ylabel("Median Lead Time") ax.legend(["Median Lead Time"], loc="upper left") ax1 = ax.twinx() ax1 = cancel_count.plot(kind="line", marker="v", color="green") ax1.set_ylabel("Canceled bookings count") ax1.legend(["Cancellations count"], loc="upper right") plt.title("Median Lead Time and canceled bookings count per month") # The data shows that as **lead time** increases, the number of cancellations in hotel bookings also tends to increase. While there may be various reasons for cancellations, such as changes in travel plans or unforeseen circumstances, hotels should be aware that guests may be more likely to cancel their reservations the further out they are made. # ### **Q. Which is the most sought room?** # value counts of each room type confirmed_bookings["assigned_room_type"].value_counts() # Number of bookings according to room type fig, ax = plt.subplots(1, 2, figsize=(20, 8)) ordered_rooms = np.sort(confirmed_bookings["assigned_room_type"].unique()) fig1 = sns.countplot( x="reserved_room_type", hue="hotel", data=confirmed_bookings, order=ordered_rooms, ax=ax[0], ) fig1.set_xlabel("Room type reserved") fig1.set_ylabel("Number of Bookings") fig1.set_title("Number of bookings according to room type", fontsize=20) fig1.legend(loc=1) fig2 = sns.boxplot( x="reserved_room_type", y="adr", hue="hotel", data=confirmed_bookings, order=ordered_rooms, ax=ax[1], ) fig2.set_xlabel("Room type reserved") fig2.set_title("ADR according to room type for each hotel", fontsize=20) fig2.legend(loc=1) plt.tight_layout() plt.show() # * The room type with the most number of bookings is **A** followed by **D**. In case of Resort hotel, room type **A** and **D** requiress lower *ADR*. This could be one of the contributing factors to choose these 2 rooms over others. Even though the median average daily rate (*ADR*) for these rooms is not the lowest, guests at the City hotel tend to prefer them. # * Other types of room have also reservaions but number is low as compared to the former ones. The reason for that could be because of the high ADR these rooms cost. Rooms of type **I** and **K** have very few bookings. The room type **L** has only 1 booking that too got cancelled. # We will need to figure out which room type suits which category of people as the guests visting the hotels belong to either adults or adults with children and babies or children only.. # dataframe with only 1 adult adult_count1 = confirmed_bookings[confirmed_bookings["adults"] == 1][ "reserved_room_type" ].value_counts() # dataframe with 2 adults adult_count2 = confirmed_bookings[confirmed_bookings["adults"] == 2][ "reserved_room_type" ].value_counts() # dataframe with 3 adults adult_count3 = confirmed_bookings[confirmed_bookings["adults"] == 3][ "reserved_room_type" ].value_counts() # dataframe with equal to or greater than 4 adults adult_count4 = confirmed_bookings[confirmed_bookings["adults"] >= 4][ "reserved_room_type" ].value_counts() # dataframe with only children child_count = confirmed_bookings[confirmed_bookings["adults"] == 0][ "reserved_room_type" ].value_counts() # dataframe with both adults and children total_count = confirmed_bookings["reserved_room_type"].value_counts() data = [ adult_count1, adult_count2, adult_count3, adult_count4, child_count, total_count, ] titles = [ "1 adult", "2 adults", "3 adults", "4 or more adults", "only children", "all the guests", ] fig, ax = plt.subplots(2, 3, figsize=(20, 12)) ax = ax.flatten() for i in range(len(data)): data[i].plot(kind="bar", ax=ax[i]) ax[i].set_title(f"Bookings having {titles[i]}", fontsize=18) ax[i].set_xlabel("Number of bookings") ax[i].set_ylabel("Room type reserved") plt.tight_layout() plt.show() # * Most of the guests checking in are single adult or a couple of adults. The order of preference of the room type remains same in case of guests with 1 or 2 adults. Most preferred room type is **A** followed by **D**, **E**, **F** and **G**. # * Bookings with 3 adults mostly prefer room **D** followed by **A**, **E**, **H**, **G** and **F**. # * There are a few bookings with guests having 4 or more adults. Their most preferred room is **G**. While some of them also booked **H**, **D**, **C**, **F**, **E**, **L** and **A** in order of preference. # * There may be a case when adults can not accompany the children and babies. In that scenario, adults can book rooms for their children at places they can't watch them over. Hence, if someone wants to book any room for any children the order of preference will be **B**, **A**, **F**, **E** and **D**. # * Most number of bookings were made for 2 adults. For all of the bookings, most preferred room was **A** followed by **D**, **E**, **F** and **G**. # ### **Q. Which is the busiest month?** ordered_month = np.array( [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] ) data = ( confirmed_bookings.groupby("arrival_date_month")["hotel"] .value_counts() .unstack() .reindex(ordered_month) .reset_index() .rename_axis(None, axis=1) ) # plotting the total bookings by month and bookings by month for each hotel fig, ax = plt.subplots(1, 2, figsize=(18, 8)) data.plot(x="arrival_date_month", kind="bar", stacked=True, ax=ax[0], fontsize=16) ax[0].set_title("Total bookings by month", fontsize=20) data.plot(x="arrival_date_month", kind="bar", ax=ax[1], fontsize=16) ax[1].set_title("Bookings by Month for each hotel", fontsize=20) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Month of arrival", fontsize=18) plt.ylabel("Count of hotels", fontsize=18) plt.xticks(fontsize=16, rotation=45) plt.yticks(fontsize=16) plt.legend(fontsize=16) plt.tight_layout() plt.show() # Both these hotels enjoy a good number of guests during the spring and summer seasons. However, this number decreases once autumn kicks in and the number goes on decreasing till winter. ADR might be an important factor in attracting guests during different seasons which we will discuss later. # ### **Q. Which hotel has higher adr(price per night) and which one makes more revenue?** fig, ax = plt.subplots(1, 2, figsize=(20, 8)) data = ( confirmed_bookings.groupby(["arrival_date_month", "hotel"]) .agg({"adr": np.mean, "revenue": np.sum}) .reset_index() ) data["arrival_date_month"] = pd.Categorical( data["arrival_date_month"], categories=ordered_month, ordered=True ) fig1 = sns.lineplot( data=data, x="arrival_date_month", y="adr", hue="hotel", linewidth=4, marker="o", ax=ax[0], markersize=14, ) ax[0].set_title("ADR and Number of bookings per month", fontsize=20) ax[0].set_ylabel("Average Daily Rate (ADR)", fontsize=18) ax[0].legend(loc="upper left", fontsize=16) ax1 = ax[0].twinx() # plotting a bar plot showing the total bookings per month data1 = ( confirmed_bookings.groupby("arrival_date_month")["hotel"] .value_counts() .unstack() .reindex(ordered_month) .reset_index() .rename_axis(None, axis=1) ) data1.plot( x="arrival_date_month", kind="bar", cmap="viridis", ylim=[0, 8000], ax=ax1, fontsize=16, alpha=0.5, ) ax1.set_ylabel("Number of bookings", fontsize=18) ax1.legend(loc="upper right", fontsize=16) fig2 = sns.lineplot( data=data, x="arrival_date_month", y="revenue", hue="hotel", linewidth=4, marker="o", ax=ax[1], markersize=14, ) ax[1].set_title("Gross Revenue Per Month", fontsize=20) ax[1].set_ylabel("Revenue", fontsize=18) ax[1].legend(fontsize=16) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Month", fontsize=18) plt.xticks(fontsize=16, rotation=45) plt.yticks(fontsize=16) plt.tight_layout() plt.show() # Irresepective of the higher price, the number of bookings increases during spring and it keeps on increasing till summer. During the busier seasons, ADR seems to be on the higher side. Maximum revenue is generated during summer when people usually go out for spending their vacation while minimum revenue is generated during the winter season. # Let us check whether higher ADR leads to guests canceling the bookings or not: cancel_adr = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month")["adr"] .median() .reindex(ordered_month) ) cancel_count = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month") .size() .reindex(ordered_month) ) fig, ax = plt.subplots(figsize=(12, 6)) ax = cancel_adr.plot(kind="line", marker="o") ax.set_ylabel("Median ADR") ax.legend(["Median ADR"], loc="upper left") ax1 = ax.twinx() ax1 = cancel_count.plot(kind="line", marker="v", color="green") ax1.set_ylabel("Canceled bookings count") ax1.legend(["Cancellations count"], loc="upper right") plt.title("Median ADR and canceled bookings count per month") # We can see from the above plot that as ADR increases, number of cancellations also increases. This can be due the fact that guests often book a hotel but at the time of checking in they seem reluctant to pay such a high price and look for alternative hotels. # ### **Q. When do guests of different types visit the hotels?** data_cust = ( confirmed_bookings.groupby(["arrival_date_month", "customer_type"])[ "guest_in_total" ] .sum() .reset_index() ) ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] data_cust["arrival_date_month"] = pd.Categorical( data_cust["arrival_date_month"], categories=ordered_month, ordered=True ) data_cust = data_cust.sort_values("arrival_date_month") plt.figure(figsize=(16, 8)) p = sns.lineplot( x="arrival_date_month", y="guest_in_total", hue="customer_type", data=data_cust, marker="o", ) plt.xticks(rotation=45) plt.title("Guest type monthwise") plt.xlabel("Month") plt.ylabel("Number of guests") plt.show() # * There is a heightened presence of Transient-Party guests in October. # * Transient guests experience a noticeable surge in numbers from June to August. # * The Group type guest count remains consistently low throughout the year. # * Contract guests exhibit a low attendance rate in the first half of the year. But the number surges till September and gradually falls till it hits December. # ### **Q. From where do the most guests come?** # Checking the country of origin of the guests that booked the hotels- confirmed_bookings["country"].value_counts().head() # Since, countries are mentioned in their respective codes, we will get their full names with the help of a python library called "**pycountry**" # importing pycountry to get the corresponding names of the countries against their codes import pycountry as pc list_alpha_2 = [i.alpha_2 for i in list(pc.countries)] list_alpha_3 = [i.alpha_3 for i in list(pc.countries)] def country_name(code): try: if len(code) == 2 and code in list_alpha_2: return pc.countries.get(alpha_2=code).name elif len(code) == 3 and code in list_alpha_3: return pc.countries.get(alpha_3=code).name except KeyError: return code confirmed_bookings["country_names"] = confirmed_bookings["country"].apply(country_name) bookings_countrywise = ( confirmed_bookings["country_names"] .value_counts() .reset_index() .rename(columns={"index": "Country", "country_names": "Guests in total"}) ) top_10_countries = bookings_countrywise.nlargest(10, "Guests in total") top_10_countries_p = ( confirmed_bookings["country_names"] .value_counts(normalize=True) .sort_values(ascending=False)[:10] ) plt.figure(figsize=(18, 8)) plt.bar(top_10_countries["Country"], top_10_countries["Guests in total"]) plt.xticks(rotation=30) plt.xlabel("Country") plt.ylabel("Guests in total") for i, p in enumerate(plt.gca().patches): width = p.get_width() height = p.get_height() x, y = p.get_xy() plt.text( x + width / 2, y + height * 1.01, str(round(top_10_countries_p[i] * 100, 2)) + "%", ha="center", ) plt.title("Top 10 countries by number of guests") plt.show() import folium from folium.plugins import HeatMap basemap = folium.Map() guests = ( confirmed_bookings["country"] .value_counts() .reset_index() .rename(columns={"index": "Country", "country": "Guests in total"}) ) guests_map = px.choropleth( guests, locations="Country", color="Guests in total", hover_data=guests.columns.to_list(), color_continuous_scale=px.colors.sequential.Oranges, hover_name="Country", ) guests_map.show() # From the above information, it can be observed that people from all over the globe visit these two hotels.However, most of the hotel bookings are from **Portugal**, followed by- # * **United Kingdom** # * **France** # * **Spain**, and # * **Germany** # So, majority of the visitor are from European countries. # ### **Q. Which agent makes the most number of bookings?** # checking the agent column confirmed_bookings["agent"].value_counts().head() # There are some entries with Agent_ID = 0 as they were booked without any help from an agent. So, we need to get rid of those entries. data = confirmed_bookings[~(confirmed_bookings["agent"] == 0)]["agent"].value_counts() # plotting the barplot plt.figure(figsize=(18, 6)) data.sort_values(ascending=False)[:10].plot(kind="bar") plt.xlabel("Agent ID") plt.ylabel("Number of bookings") plt.title("Top 10 Booking agents") plt.show() # * Maximum number of bookings were made by the agent with ID=**9** followed by **240, 7, 14** and **250**. # * A significant number of bookings were made without the use of any agent. # ### **Q. Which distribution channel makes the most out of bookings of each hotel?** dist_channel = ( confirmed_bookings.groupby(["distribution_channel", "hotel"])["guest_in_total"] .sum() .reset_index() ) dist_channel_p = confirmed_bookings["distribution_channel"].value_counts(normalize=True) fig, ax = plt.subplots(1, 2, figsize=(18, 8)) fig1 = sns.barplot( x="distribution_channel", y="guest_in_total", hue="hotel", data=dist_channel, ax=ax[0], ) ax[0].set_title("Distribution channels with the Least and Most Bookings for Each Hotel") ax[0].set_xlabel("Distribution Channel") ax[0].set_ylabel("Total guests") fig2 = dist_channel_p.plot.pie( ax=ax[1], explode=[0, 0.07, 0.07, 0.07, 1], autopct="%1.2f%%", shadow=True ) ax[1].set_title("Distribution Channel") plt.tight_layout() plt.show() # * The majority of bookings at the hotels come from guests in the **Online/Offline Travel Agency** distribution channel, followed by those in the Direct channel. # * **Coporate** and **GDS** channels have a low participation in the booking numbers for each hotel. # ### **Q. Which market segment makes the most out of bookings of each hotel?** market_seg = ( confirmed_bookings.groupby(["market_segment", "hotel"])["guest_in_total"] .sum() .reset_index() ) market_seg_p = confirmed_bookings["market_segment"].value_counts(normalize=True) fig, ax = plt.subplots(1, 2, figsize=(20, 8)) # plotting a barplot to check the strongest and the weakest market segment for each hotel fig1 = sns.barplot( x="market_segment", y="guest_in_total", hue="hotel", data=market_seg, ax=ax[0] ) ax[0].tick_params(axis="x", rotation=45) ax[0].set_title("Market Segments with the Least and Most Bookings for Each Hotel") ax[0].set_xlabel("Market segment") ax[0].set_ylabel("Total guests") # plotting a pieplot to fig2 = market_seg_p.plot.pie( ax=ax[1], explode=[0, 0.05, 0.05, 0.05, 0.5, 1, 2], autopct="%1.2f%%", shadow=True ) ax[1].set_title("Market Segment") plt.tight_layout() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/387/129387428.ipynb
hotel-booking-demand
jessemostipak
[{"Id": 129387428, "ScriptId": 38469625, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11779841, "CreationDate": "05/13/2023 11:05:00", "VersionNumber": 1.0, "Title": "notebookda6ff11dfe", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 786.0, "LinesInsertedFromPrevious": 786.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185388249, "KernelVersionId": 129387428, "SourceDatasetVersionId": 944030}]
[{"Id": 944030, "DatasetId": 511638, "DatasourceVersionId": 971732, "CreatorUserId": 4476084, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "02/13/2020 01:27:20", "VersionNumber": 1.0, "Title": "Hotel booking demand", "Slug": "hotel-booking-demand", "Subtitle": "From the paper: hotel booking demand datasets", "Description": "### Context\n\nHave you ever wondered when the best time of year to book a hotel room is? Or the optimal length of stay in order to get the best daily rate? What if you wanted to predict whether or not a hotel was likely to receive a disproportionately high number of special requests?\n\nThis hotel booking dataset can help you explore those questions!\n\n### Content\n\nThis data set contains booking information for a city hotel and a resort hotel, and includes information such as when the booking was made, length of stay, the number of adults, children, and/or babies, and the number of available parking spaces, among other things.\n\nAll personally identifying information has been removed from the data.\n\n### Acknowledgements\n\nThe data is originally from the article [**Hotel Booking Demand Datasets**](https://www.sciencedirect.com/science/article/pii/S2352340918315191), written by Nuno Antonio, Ana Almeida, and Luis Nunes for Data in Brief, Volume 22, February 2019.\n\nThe data was downloaded and cleaned by Thomas Mock and Antoine Bichat for [#TidyTuesday during the week of February 11th, 2020](https://github.com/rfordatascience/tidytuesday/blob/master/data/2020/2020-02-11/readme.md). \n\n### Inspiration\n\nThis data set is ideal for anyone looking to practice their exploratory data analysis (EDA) or get started in building predictive models!\n\nIf you're looking for inspiration on data visualizations, check out the [#TidyTuesday program](https://github.com/rfordatascience/tidytuesday), a free, weekly online event that encourages participants to create and share their [code and visualizations for a given data set on Twitter](https://twitter.com/search?q=%23TidyTuesday&src=typed_query).\n\nIf you'd like to dive into predictive modeling, [Julia Silge](https://twitter.com/juliasilge) has an [accessible and fantastic walk-through](https://juliasilge.com/blog/hotels-recipes/) which highlights the [`tidymodels`](https://www.tidyverse.org/blog/2018/08/tidymodels-0-0-1/) R package.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 511638, "CreatorUserId": 4476084, "OwnerUserId": 4476084.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 944030.0, "CurrentDatasourceVersionId": 971732.0, "ForumId": 524873, "Type": 2, "CreationDate": "02/13/2020 01:27:20", "LastActivityDate": "02/13/2020", "TotalViews": 971633, "TotalDownloads": 111900, "TotalVotes": 2217, "TotalKernels": 421}]
[{"Id": 4476084, "UserName": "jessemostipak", "DisplayName": "Jesse Mostipak", "RegisterDate": "02/11/2020", "PerformanceTier": 2}]
# # **Project Name - Hotel Booking EDA** # ##### **Project Type** - EDA # ##### **Contribution** - Individual # ##### **Contributor** - Manash Jyoti Borah # # **Problem Statement** # This hotel booking dataset can help us explore many questions regarding cancelations of bookings, how to increase revenue and improve efficiency, which is the busiest period in hotel industry etc. This data set contains booking information for a city hotel and a resort hotel, and includes information such as when the booking was made, length of stay, the number of adults, children, and/or babies, and the number of available parking spaces, among other things. All personally identifying information has been removed from the data. We will explore and analyse the data to discover important factors and draw key insights that govern the bookings. # #### **Business Objective** # ***Main objective is to identify key trends and patterns in the data that can be used to improve business strategies in the hotel industry*** # Understanding the travel patterns and preferences of their guests is important for hotels in order to make informed decisions about things like peak seasons, common reasons for cancellations, demand for amenities such as parking, and the number of children staying at the property. For many people, traveling is a thrilling and meaningful experience. # Importing all the required python modules and libraries import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime as dt, date import plotly.express as px import warnings warnings.filterwarnings("ignore") # upgrading matplotlib to the latest version so that we can use the latest features # Loading the dataset path = "/kaggle/input/hotel-booking-demand/hotel_bookings.csv" booking_df = pd.read_csv(path) # Checking the first 5 rows of the dataset booking_df.head() # * EDA involves investigating the data and making some useful insights out of it. # * We can explore the data and analyse it using different plots obtained with the use of libraries like *matplotlib* and *seaborn*. # * In the **non-graphical approach**, we will be using functions such as shape, summary, describe, isnull, info, datatypes and more. # * In the **graphical approach**, we will be using plots such as pie, scatter, box, bar, density and correlation plots. # ### 1. Basic information about the dataset: # basic information of the dataset booking_df.info() # shape of the dataframe booking_df.shape # # * The dataset has 119390 rows and 32 columns. # * Some of the columns need conversion of datatypes. # * Further, we will add new columns to extract insights from the dataset. # # brief description of the numerical columns of the dataset booking_df.describe() # brief description of the categorical columns of the dataset booking_df.describe(include="object") # creating a copy of the dataframe so that original one remains intact without any alteration booking_df_c = booking_df.copy() # ### 2. Checking for null values: # checking if any of the columns has null values, and if found, sorting them in descending order of number of null values booking_df_c.isnull().sum().sort_values(ascending=False) # * From the above data, it can be seen that most of the hotels listed don't # belong to any company ie. they are mostly privately run. Most of the people preferred booking on their own than opting for any agent. Hence, there are so many **null** values in columns namely "company" and "agent". \\ # # * In few of the cases, country name was not inserted leading to **null** values in those entries. So, we will put "**Others**" in place of all the null values in "**country**" column. # * In very few cases, the number of children accompanying the adults are missing and hence, we will fill those entries with the median of the entire "**children**" column. # ### 3. Removing duplicated rows present if any: # checking if the dataset has any duplicated rows sum(booking_df_c.duplicated()) # dropping the duplicate rows booking_df_c.drop_duplicates(inplace=True) # dataframe info after dropping the duplicate rows booking_df_c.info() # After removing the duplicated rows, we have 87396 rows. # Since, the company column has too many **null** values, we can ***drop*** this one to make our analysis more efficient. booking_df_c.drop(columns=["company"], axis=1, inplace=True) # We will ***replace*** all the **null** values of "agent" column with 0. booking_df_c["agent"].fillna(0, inplace=True) # checking for null values in the "country" column sum(booking_df_c["country"].isna()) # Since, many of the country entries are missing, we will fill those values with "Others". booking_df_c["country"].fillna("Others", inplace=True) # Four entries of the children column have **null** values. So, we will fill those entries with value = 0. booking_df_c["children"].fillna(0, inplace=True) # There are a few columns with outliers. So, we would like to remove them before proceeding to actual EDA. booking_df_c = booking_df_c.loc[ (booking_df_c["adr"] < 5000) & (booking_df_c["babies"] < 5) ] # ### 4. Getting an insight of all the unique values of each column of the dataset: pd.Series({col: booking_df_c[col].unique() for col in booking_df_c}) # The columns of the dataframe and the data they represent are listed below: # 1. **hotel** : Name of the hotel namely - Resort Hotel and City Hotel # 2. **is_canceled** : If the booking was canceled (1) or not (0) # 3. **lead_time** : Number of days before the actual arrival of the guests # 4. **arrival_date_year** : Year of arrival date # 5. **arrival_date_month** : Month of arrival date # 6. **arrival_date_week_number** : Week number of the year for arrival date # 7. **arrival_date_day_of_month** : Day of arrival date # 8. **stays_in_weekend_nights** : Number of weekend nights (Saturday or Sunday) spent at the hotel by the guests. # 9. **stays_in_week_nights** : Number of weeknights (Monday to Friday) spent at the hotel by the guests. # 10. **adults** : Number of adults among the guests # 11. **children** : Number of children accompanying the adults # 12. **babies** : Number of babies accompanying the adults # 13. **meal** : Type of meal booked # 14. **country** : Country of origin of the guests # 15. **market_segment** : Designation of market segment # 16. **distribution_channel** : Name of booking distribution channel # 17. **is_repeated_guest** : If the booking was from a repeated guest (1) or not (0) # 18. **previous_cancellations** : Number of previous bookings that were cancelled by the customer prior to the current booking # 19. **previous_bookings_not_canceled** : Number of previous bookings not cancelled by the customer prior to the current booking # 20. **reserved_room_type** : Code of room type reserved # 21. **assigned_room_type** : Code of room type assigned # 22. **booking_changes** : Number of changes made to the booking # 23. **deposit_type** : Type of the deposit made by the guest # 24. **agent** : ID of travel agent who made the booking # 25. **company** : ID of the company that made the booking # 26. **days_in_waiting_list** : Number of days the booking was in the waiting list # 27. **customer_type** : Type of customer, assuming one of four categories # 28. **adr** : Average Daily Rate, as defined by dividing the sum of all lodging transactions by the total number of staying nights # 29. **required_car_parking_spaces** : Number of car parking spaces required by the customer # 30. **total_of_special_requests** : Number of special requests made by the customer # 31. **reservation_status** : Reservation status (Cancelled, Check-Out or No-Show) # 32. **reservation_status_date** : Date at which the last reservation status was updated # It can be observed from the unique values of columns namely "adults", "children" and "babies" that there are entries with value = 0. If the number of adults = 0, then we need to remove those rows as "children" and "babies" can't book a hotel without being accompanied by an adult. filter = ( (booking_df_c["children"] == 0) & (booking_df_c["adults"] == 0) & (booking_df_c["babies"] == 0) ) booking_df_c[filter] booking_df_c.drop(booking_df_c[filter].index, inplace=True) # renaming the values in the is_canceled column to appropriate names to ease the analysis booking_df_c["is_canceled"] = np.where( booking_df_c["is_canceled"] == 0, "not_canceled", "canceled" ) booking_df_c.shape booking_df_c.isnull().sum().sort_index(ascending=False).head() # As we don't have any null values, we can proceed to change the datatypes of the columns wherever required. # ### 5. Converting columns of the dataframe to appropriate datatypes: # converting datatype of columns 'children' and 'agent' from float to int. booking_df_c[["children", "agent"]] = booking_df_c[["children", "agent"]].astype( "int64" ) # converting datatype of column 'reservation_status_date' from str to datetime. booking_df_c["reservation_status_date"] = pd.to_datetime( booking_df_c["reservation_status_date"], format="%Y-%m-%d" ) # ### 6. Adding new columns: # Adding length_of_stay in the hotel as the sum of stays in weekend_nights and week_nights booking_df_c["length_of_stay"] = ( booking_df_c["stays_in_weekend_nights"] + booking_df_c["stays_in_week_nights"] ) # adding revenue which is equal to adr*length_of_stay booking_df_c["revenue"] = [ los * adr if los > 0 else adr for los, adr in zip(booking_df_c["length_of_stay"], booking_df_c["adr"]) ] # adding guest in total booking_df_c["guest_in_total"] = ( booking_df_c["adults"] + booking_df_c["children"] + booking_df_c["babies"] ) # ## **Exploratory Data Analysis** # ### **Q. What is the percentage of booking in each hotel?** # Since, there are two types of hotels - Resort and City hotel which is evident from the series depicting unique values of each column, we can use a *pie chart* or a *bar plot* to display the data. # setting the background sns.set_style("ticks") # setting the color pallette sns.set_palette(sns.color_palette("rainbow")) # changing the default context sns.set_context("talk") fig, ax = plt.subplots(1, 2, figsize=(20, 8)) # plotting the pie plot data = np.array(booking_df_c["hotel"].value_counts().to_list()) labels = np.array(booking_df_c["hotel"].value_counts().index.to_list()) ax[0].pie( data, labels=labels, autopct="%0.2f%%", explode=[0, 0.05], startangle=90, textprops={"fontsize": 18}, shadow=True, ) ax[0].axis("equal") ax[0].set_title( "Pie chart showing percentage of booking of each type of hotel", fontsize=20 ) # plotting the bar plot side by side data_bar = ( booking_df_c["hotel"] .value_counts() .reset_index() .rename(columns={"index": "hotel", "hotel": "count"}) ) ax[1] = sns.barplot(data=data_bar, y="count", x="hotel") ax[1].set_title( "Bar plot showing number of bookings of hotels for each type", fontsize=20 ) ax[1].bar_label(ax[1].containers[0], fontsize=16) ax[1].tick_params(axis="both", labelsize=16) ax[1].set_xlabel("Hotel", fontsize=18) ax[1].set_ylabel("Count of hotels", fontsize=18) plt.tight_layout() plt.show() # # * People prefer City hostels over Resort hotels as they might be cheaper as compared to the later ones. # * Number of bookings for City Hotel is almost 1.57 times more than that of Resort Hotel. # Let us have a look at the booking statistics for each hotel throughout the timeline **2015-2017** data_2015 = booking_df_c[booking_df_c["arrival_date_year"] == 2015] data_2016 = booking_df_c[booking_df_c["arrival_date_year"] == 2016] data_2017 = booking_df_c[booking_df_c["arrival_date_year"] == 2017] ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] fig, ax = plt.subplots(3, 1, sharex=True, figsize=(16, 20)) data = [data_2015, data_2016, data_2017] years = [2015, 2016, 2017] for i, year in enumerate(data): ax[i].set_title(f"{years[i]}", fontsize=16) sns.countplot( data=year, y="arrival_date_month", hue="hotel", order=ordered_month, ax=ax[i] ) ax[i].set_ylabel("Month", fontsize=16) ax[i].set_xlabel("Number of bookings", fontsize=16) plt.title(f"{years[i]}", fontsize=18) plt.suptitle("Number of bookings in each year:", fontsize=20, y=0.92) plt.show() # The dataset contains data from **July, 2015** to **August, 2017**. Hence, we have 3 years of data for **July** and **August**, while for all other months the dataset contains data for only 2 years. Maximum number of bookings for the Resort hotel was reported in **Aug, 2017** while that of for the City hotel was reported in **May, 2017**. # ### **Q. How many bookings were canceled?** # We can split the data into two seperate dataframes containing information of each hotel to ease our analysis. resort_ht = booking_df_c[booking_df_c["hotel"] == "Resort Hotel"] city_ht = booking_df_c[booking_df_c["hotel"] == "City Hotel"] fig = plt.figure(figsize=(22, 10)) grid = plt.GridSpec(2, 4, hspace=0.2, wspace=0.1) ax1 = fig.add_subplot(grid[0:, :2]) ax2 = fig.add_subplot(grid[0, 2:]) ax3 = fig.add_subplot(grid[1, 2:]) # countplot to show booking cancelation in each type of hotel sns.countplot(x="hotel", data=booking_df_c, hue="is_canceled", ax=ax1).set_title( "Hotel Booking Cancellation", fontsize=20 ) ax1.set_xlabel("Hotel type", fontsize=16) ax1.set_ylabel("Count of bookings", fontsize=16) ax1.tick_params(axis="both", labelsize=14) ax1.bar_label(ax1.containers[0], fontsize=14) ax1.bar_label(ax1.containers[1], fontsize=14) ax1.legend(loc=0, fontsize=14) # pieplot to show booking statistics for Resort Hotel and City Hotel resort_data = resort_ht["is_canceled"].value_counts(normalize=True) city_data = city_ht["is_canceled"].value_counts(normalize=True) titles = ["Resort Hotel", "City Hotel"] data = [resort_data, city_data] labels = ["confirmed", "canceled"] explode = [0.05, 0.05] for i, ax in enumerate([ax2, ax3]): ax.pie( data[i], labels=labels, explode=explode, autopct="%0.2f%%", startangle=45, textprops={"fontsize": 16}, shadow=True, ) ax.set_title(f"Booking statistics for {titles[i]}", fontsize=20) # From the above analysis, it has been found that the number of guests booking for the City Hotel is more, likewise the percentage of cancellation sees an analogous trend of being **higher** as compared to that of Resort Hotel. The reasons for cancellation in both the hotels have to be figured out in the analysis later. data_2015 = booking_df_c[booking_df_c["arrival_date_year"] == 2015] data_2016 = booking_df_c[booking_df_c["arrival_date_year"] == 2016] data_2017 = booking_df_c[booking_df_c["arrival_date_year"] == 2017] ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] fig, ax = plt.subplots(3, 1, sharex=True, figsize=(16, 20)) data = [data_2015, data_2016, data_2017] years = [2015, 2016, 2017] for i, year in enumerate(data): ax[i].set_title(f"{years[i]}", fontsize=16) sns.countplot( data=year, y="arrival_date_month", hue="is_canceled", hue_order=["not_canceled", "canceled"], order=ordered_month, ax=ax[i], ) ax[i].set_ylabel("Month", fontsize=16) ax[i].set_xlabel("booking statistics", fontsize=16) plt.title(f"{years[i]}", fontsize=18) plt.suptitle("Number of cancellations in each year:", fontsize=20, y=0.92) plt.show() # Maximum number of cancellations were seen during summer. The number of cancellations increases with increase in the number of bookings. # ### **Q. How long people stay in each hotel?** # Now, let us make a seperate dataframe for all the valid hotel bookings (ie. bookings which were not canceled). confirmed_bookings = booking_df_c[booking_df_c["is_canceled"] == "not_canceled"] # plotting two subplots to show the length-of-stay for each type of hotel fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 14)) # countplot of LOS fig1 = sns.countplot(x="length_of_stay", hue="hotel", data=confirmed_bookings, ax=ax[0]) fig1.set_title( "Countplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig1.set_ylabel("Count of hotels", fontsize=16) fig1.legend(fontsize=14, loc=1) # boxplot of LOS to find outliers present if any fig2 = sns.boxplot(x="length_of_stay", y="hotel", data=confirmed_bookings, ax=ax[1]) fig2.set_title( "Boxplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig2.set_ylabel("Hotel type", fontsize=16) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Length of stay", fontsize=16) plt.tick_params(axis="both", labelsize=14) plt.tight_layout() plt.show() # From the above plots, it can be seen that people usually stay for less than 7 days in either of the hotel type. However, there are a few occasions where length-of-stay exceeds 15 days and reaches up to 69 days. Hence, for ease of analysis, we will not consider the outliers and will consider the length-of-stay up to 14 days only. fig = plt.figure(figsize=(24, 12)) grid = plt.GridSpec(2, 4, hspace=0.2, wspace=0.3) ax1 = fig.add_subplot(grid[0:, :2]) ax2 = fig.add_subplot(grid[0, 2:]) ax3 = fig.add_subplot(grid[1, 2:], sharex=ax2, sharey=ax2) data = confirmed_bookings[confirmed_bookings["length_of_stay"] < 15] # countplot to show LOS in each type of hotel fig1 = sns.countplot(x="length_of_stay", hue="hotel", data=data, ax=ax1) fig1.set_title( "Countplot showing length-of-stay in each type of hotel in days", fontsize=18 ) fig1.set_xlabel("Length of stay", fontsize=16) # countplot to show LOS in each type of hotel in weekend nights fig2 = sns.countplot( x="stays_in_weekend_nights", hue="hotel", data=confirmed_bookings, ax=ax2 ) fig2.set_title( "Countplot showing stays-in-weekend-nights in each type of hotel in days", fontsize=18, ) fig2.set_xlabel("Stays in weekend nights", fontsize=16) # countplot to show LOS in each type of hotel in week nights fig3 = sns.countplot( x="stays_in_week_nights", hue="hotel", data=confirmed_bookings, ax=ax3 ) fig3.set_title( "Countplot showing stays-in-week-nights in each type of hotel in days", fontsize=18 ) fig3.set_xlabel("Stays in week nights", fontsize=16) for axis in (ax1, ax2, ax3): plt.sca(axis) plt.ylabel("Count of hotels", fontsize=16) plt.tick_params(axis="both", labelsize=14) plt.legend(loc="upper right", fontsize=14) # Most of the guests usually spend less than a week in each type of hotels. For short duration, people tend to stay in city hotels while for spending longer duration, people prefer resort hotel. Number of nights spent in weekdays is higher as compared that of spent in weekends irrespective of the hotel type. # ### **Q. How significance is it to have a parking space?** # Let us have a overview of the importance of having a parking space in each of the hotel. fig, ax = plt.subplots(1, 2, figsize=(20, 8)) explode = (0.05, 0.05) pie_city = city_ht["required_car_parking_spaces"].value_counts(normalize=True) pie_resort = resort_ht["required_car_parking_spaces"].value_counts(normalize=True) titles = ["City Hotel", "Resort Hotel"] for i, pie in enumerate([pie_city, pie_resort]): ax[i].pie( pie[pie > 0.01], labels=["Parking not required", "Parking required"], autopct="%1.1f%%", explode=explode, startangle=45, textprops={"fontsize": 16}, shadow=True, ) ax[i].set_title(f"Parking Spaces Required - {titles[i]}", fontsize=20) plt.show() # * The pie charts show that a majority of customers in both City and Resort hotel do not have a need for a parking space. However, the proportion of customers who require a parking space is higher in Resort hotel compared to City hotel, reaching **15.9%**. # * This highlights the importance of providing **at least 1** parking space per reservation in resort hotels to meet the demands of their customers. # ### **Q. Which is the most preferred meal type?** # Checking the different types meals served at each hotel: confirmed_bookings.groupby("hotel")["meal"].value_counts() fig, ax = plt.subplots(1, 2, figsize=(16, 8)) pie_city = city_ht["meal"].value_counts() pie_resort = resort_ht["meal"].value_counts() titles = ["City Hotel", "Resort Hotel"] label1 = ["Bed & Breakfast", "Self Catering", "Half Bread", "Full Bread"] label2 = ["Bed & Breakfast", "Half Bread", "Undefined", "Full Bread", "Self Catering"] labels = [label1, label2] explode1 = [0.05, 0.05, 0.05, 0.05] explode2 = [0.05, 0.05, 0.07, 1, 1.5] explode = [explode1, explode2] for i, pie in enumerate([pie_city, pie_resort]): ax[i].pie( pie, labels=labels[i], autopct="%1.2f%%", explode=explode[i], shadow=True, textprops={"fontsize": 18}, ) ax[i].set_title(f"Meals catered at the {titles[i]}", fontsize=20) # plt.tight_layout() plt.show() # * The **Bed and Breakfast** (BB) plan is the most popular meal plan in both hotels, with City Hotel serving 77.26% and Resort Hotel serving 78.77% of the total meals under this plan. This suggests that guests at both hotels prefer a flexible option that allows them to dine outside of the hotel. # * The **Half Board** (HB) plan is more popular at the Resort Hotel as compared to at the City Hotel. This could indicate that guests at the Resort Hotel are more interested in dining at the hotel compared to those at the City Hotel. # * The very low number of meals served under the **FB** plan at both the hotels suggests that this may not be a popular option for guests. # * The low number of meals served under the **SC** plan at both hotels suggests that guests are not interested in this option. The business may want to re-evaluate this offering and determine if it is worth continuing to offer. # * **Undefined** meal plan at Resort Hotel with 410 meals served, suggesting confusion or miscommunication regarding meal options. The business should investigate this further and determine what changes can be made to ensure a clear understanding of the meal options for guests. # ### **Q. Does higher lead time lead to booking cancelations?** plt.figure(figsize=(12, 8)) g = ( sns.FacetGrid(booking_df_c, hue="is_canceled", height=6, aspect=2) .map(sns.kdeplot, "lead_time", shade=True) .add_legend() ) # When the lead time exceeds around **50** days, the cancellation rate among guests increases. To meet the needs of customers, it is crucial to provide accurate, real-time information and offer flexible booking options that allow for easy modifications. This will help to reduce the risk of cancellations and enhance customer satisfaction. cancel_lead_time = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month")["lead_time"] .median() .reindex(ordered_month) ) cancel_count = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month") .size() .reindex(ordered_month) ) fig, ax = plt.subplots(figsize=(12, 6)) ax = cancel_lead_time.plot(kind="line", marker="o") ax.set_ylabel("Median Lead Time") ax.legend(["Median Lead Time"], loc="upper left") ax1 = ax.twinx() ax1 = cancel_count.plot(kind="line", marker="v", color="green") ax1.set_ylabel("Canceled bookings count") ax1.legend(["Cancellations count"], loc="upper right") plt.title("Median Lead Time and canceled bookings count per month") # The data shows that as **lead time** increases, the number of cancellations in hotel bookings also tends to increase. While there may be various reasons for cancellations, such as changes in travel plans or unforeseen circumstances, hotels should be aware that guests may be more likely to cancel their reservations the further out they are made. # ### **Q. Which is the most sought room?** # value counts of each room type confirmed_bookings["assigned_room_type"].value_counts() # Number of bookings according to room type fig, ax = plt.subplots(1, 2, figsize=(20, 8)) ordered_rooms = np.sort(confirmed_bookings["assigned_room_type"].unique()) fig1 = sns.countplot( x="reserved_room_type", hue="hotel", data=confirmed_bookings, order=ordered_rooms, ax=ax[0], ) fig1.set_xlabel("Room type reserved") fig1.set_ylabel("Number of Bookings") fig1.set_title("Number of bookings according to room type", fontsize=20) fig1.legend(loc=1) fig2 = sns.boxplot( x="reserved_room_type", y="adr", hue="hotel", data=confirmed_bookings, order=ordered_rooms, ax=ax[1], ) fig2.set_xlabel("Room type reserved") fig2.set_title("ADR according to room type for each hotel", fontsize=20) fig2.legend(loc=1) plt.tight_layout() plt.show() # * The room type with the most number of bookings is **A** followed by **D**. In case of Resort hotel, room type **A** and **D** requiress lower *ADR*. This could be one of the contributing factors to choose these 2 rooms over others. Even though the median average daily rate (*ADR*) for these rooms is not the lowest, guests at the City hotel tend to prefer them. # * Other types of room have also reservaions but number is low as compared to the former ones. The reason for that could be because of the high ADR these rooms cost. Rooms of type **I** and **K** have very few bookings. The room type **L** has only 1 booking that too got cancelled. # We will need to figure out which room type suits which category of people as the guests visting the hotels belong to either adults or adults with children and babies or children only.. # dataframe with only 1 adult adult_count1 = confirmed_bookings[confirmed_bookings["adults"] == 1][ "reserved_room_type" ].value_counts() # dataframe with 2 adults adult_count2 = confirmed_bookings[confirmed_bookings["adults"] == 2][ "reserved_room_type" ].value_counts() # dataframe with 3 adults adult_count3 = confirmed_bookings[confirmed_bookings["adults"] == 3][ "reserved_room_type" ].value_counts() # dataframe with equal to or greater than 4 adults adult_count4 = confirmed_bookings[confirmed_bookings["adults"] >= 4][ "reserved_room_type" ].value_counts() # dataframe with only children child_count = confirmed_bookings[confirmed_bookings["adults"] == 0][ "reserved_room_type" ].value_counts() # dataframe with both adults and children total_count = confirmed_bookings["reserved_room_type"].value_counts() data = [ adult_count1, adult_count2, adult_count3, adult_count4, child_count, total_count, ] titles = [ "1 adult", "2 adults", "3 adults", "4 or more adults", "only children", "all the guests", ] fig, ax = plt.subplots(2, 3, figsize=(20, 12)) ax = ax.flatten() for i in range(len(data)): data[i].plot(kind="bar", ax=ax[i]) ax[i].set_title(f"Bookings having {titles[i]}", fontsize=18) ax[i].set_xlabel("Number of bookings") ax[i].set_ylabel("Room type reserved") plt.tight_layout() plt.show() # * Most of the guests checking in are single adult or a couple of adults. The order of preference of the room type remains same in case of guests with 1 or 2 adults. Most preferred room type is **A** followed by **D**, **E**, **F** and **G**. # * Bookings with 3 adults mostly prefer room **D** followed by **A**, **E**, **H**, **G** and **F**. # * There are a few bookings with guests having 4 or more adults. Their most preferred room is **G**. While some of them also booked **H**, **D**, **C**, **F**, **E**, **L** and **A** in order of preference. # * There may be a case when adults can not accompany the children and babies. In that scenario, adults can book rooms for their children at places they can't watch them over. Hence, if someone wants to book any room for any children the order of preference will be **B**, **A**, **F**, **E** and **D**. # * Most number of bookings were made for 2 adults. For all of the bookings, most preferred room was **A** followed by **D**, **E**, **F** and **G**. # ### **Q. Which is the busiest month?** ordered_month = np.array( [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] ) data = ( confirmed_bookings.groupby("arrival_date_month")["hotel"] .value_counts() .unstack() .reindex(ordered_month) .reset_index() .rename_axis(None, axis=1) ) # plotting the total bookings by month and bookings by month for each hotel fig, ax = plt.subplots(1, 2, figsize=(18, 8)) data.plot(x="arrival_date_month", kind="bar", stacked=True, ax=ax[0], fontsize=16) ax[0].set_title("Total bookings by month", fontsize=20) data.plot(x="arrival_date_month", kind="bar", ax=ax[1], fontsize=16) ax[1].set_title("Bookings by Month for each hotel", fontsize=20) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Month of arrival", fontsize=18) plt.ylabel("Count of hotels", fontsize=18) plt.xticks(fontsize=16, rotation=45) plt.yticks(fontsize=16) plt.legend(fontsize=16) plt.tight_layout() plt.show() # Both these hotels enjoy a good number of guests during the spring and summer seasons. However, this number decreases once autumn kicks in and the number goes on decreasing till winter. ADR might be an important factor in attracting guests during different seasons which we will discuss later. # ### **Q. Which hotel has higher adr(price per night) and which one makes more revenue?** fig, ax = plt.subplots(1, 2, figsize=(20, 8)) data = ( confirmed_bookings.groupby(["arrival_date_month", "hotel"]) .agg({"adr": np.mean, "revenue": np.sum}) .reset_index() ) data["arrival_date_month"] = pd.Categorical( data["arrival_date_month"], categories=ordered_month, ordered=True ) fig1 = sns.lineplot( data=data, x="arrival_date_month", y="adr", hue="hotel", linewidth=4, marker="o", ax=ax[0], markersize=14, ) ax[0].set_title("ADR and Number of bookings per month", fontsize=20) ax[0].set_ylabel("Average Daily Rate (ADR)", fontsize=18) ax[0].legend(loc="upper left", fontsize=16) ax1 = ax[0].twinx() # plotting a bar plot showing the total bookings per month data1 = ( confirmed_bookings.groupby("arrival_date_month")["hotel"] .value_counts() .unstack() .reindex(ordered_month) .reset_index() .rename_axis(None, axis=1) ) data1.plot( x="arrival_date_month", kind="bar", cmap="viridis", ylim=[0, 8000], ax=ax1, fontsize=16, alpha=0.5, ) ax1.set_ylabel("Number of bookings", fontsize=18) ax1.legend(loc="upper right", fontsize=16) fig2 = sns.lineplot( data=data, x="arrival_date_month", y="revenue", hue="hotel", linewidth=4, marker="o", ax=ax[1], markersize=14, ) ax[1].set_title("Gross Revenue Per Month", fontsize=20) ax[1].set_ylabel("Revenue", fontsize=18) ax[1].legend(fontsize=16) for axis in ax.flatten(): plt.sca(axis) plt.xlabel("Month", fontsize=18) plt.xticks(fontsize=16, rotation=45) plt.yticks(fontsize=16) plt.tight_layout() plt.show() # Irresepective of the higher price, the number of bookings increases during spring and it keeps on increasing till summer. During the busier seasons, ADR seems to be on the higher side. Maximum revenue is generated during summer when people usually go out for spending their vacation while minimum revenue is generated during the winter season. # Let us check whether higher ADR leads to guests canceling the bookings or not: cancel_adr = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month")["adr"] .median() .reindex(ordered_month) ) cancel_count = ( booking_df_c[booking_df_c["is_canceled"] == "canceled"] .groupby("arrival_date_month") .size() .reindex(ordered_month) ) fig, ax = plt.subplots(figsize=(12, 6)) ax = cancel_adr.plot(kind="line", marker="o") ax.set_ylabel("Median ADR") ax.legend(["Median ADR"], loc="upper left") ax1 = ax.twinx() ax1 = cancel_count.plot(kind="line", marker="v", color="green") ax1.set_ylabel("Canceled bookings count") ax1.legend(["Cancellations count"], loc="upper right") plt.title("Median ADR and canceled bookings count per month") # We can see from the above plot that as ADR increases, number of cancellations also increases. This can be due the fact that guests often book a hotel but at the time of checking in they seem reluctant to pay such a high price and look for alternative hotels. # ### **Q. When do guests of different types visit the hotels?** data_cust = ( confirmed_bookings.groupby(["arrival_date_month", "customer_type"])[ "guest_in_total" ] .sum() .reset_index() ) ordered_month = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] data_cust["arrival_date_month"] = pd.Categorical( data_cust["arrival_date_month"], categories=ordered_month, ordered=True ) data_cust = data_cust.sort_values("arrival_date_month") plt.figure(figsize=(16, 8)) p = sns.lineplot( x="arrival_date_month", y="guest_in_total", hue="customer_type", data=data_cust, marker="o", ) plt.xticks(rotation=45) plt.title("Guest type monthwise") plt.xlabel("Month") plt.ylabel("Number of guests") plt.show() # * There is a heightened presence of Transient-Party guests in October. # * Transient guests experience a noticeable surge in numbers from June to August. # * The Group type guest count remains consistently low throughout the year. # * Contract guests exhibit a low attendance rate in the first half of the year. But the number surges till September and gradually falls till it hits December. # ### **Q. From where do the most guests come?** # Checking the country of origin of the guests that booked the hotels- confirmed_bookings["country"].value_counts().head() # Since, countries are mentioned in their respective codes, we will get their full names with the help of a python library called "**pycountry**" # importing pycountry to get the corresponding names of the countries against their codes import pycountry as pc list_alpha_2 = [i.alpha_2 for i in list(pc.countries)] list_alpha_3 = [i.alpha_3 for i in list(pc.countries)] def country_name(code): try: if len(code) == 2 and code in list_alpha_2: return pc.countries.get(alpha_2=code).name elif len(code) == 3 and code in list_alpha_3: return pc.countries.get(alpha_3=code).name except KeyError: return code confirmed_bookings["country_names"] = confirmed_bookings["country"].apply(country_name) bookings_countrywise = ( confirmed_bookings["country_names"] .value_counts() .reset_index() .rename(columns={"index": "Country", "country_names": "Guests in total"}) ) top_10_countries = bookings_countrywise.nlargest(10, "Guests in total") top_10_countries_p = ( confirmed_bookings["country_names"] .value_counts(normalize=True) .sort_values(ascending=False)[:10] ) plt.figure(figsize=(18, 8)) plt.bar(top_10_countries["Country"], top_10_countries["Guests in total"]) plt.xticks(rotation=30) plt.xlabel("Country") plt.ylabel("Guests in total") for i, p in enumerate(plt.gca().patches): width = p.get_width() height = p.get_height() x, y = p.get_xy() plt.text( x + width / 2, y + height * 1.01, str(round(top_10_countries_p[i] * 100, 2)) + "%", ha="center", ) plt.title("Top 10 countries by number of guests") plt.show() import folium from folium.plugins import HeatMap basemap = folium.Map() guests = ( confirmed_bookings["country"] .value_counts() .reset_index() .rename(columns={"index": "Country", "country": "Guests in total"}) ) guests_map = px.choropleth( guests, locations="Country", color="Guests in total", hover_data=guests.columns.to_list(), color_continuous_scale=px.colors.sequential.Oranges, hover_name="Country", ) guests_map.show() # From the above information, it can be observed that people from all over the globe visit these two hotels.However, most of the hotel bookings are from **Portugal**, followed by- # * **United Kingdom** # * **France** # * **Spain**, and # * **Germany** # So, majority of the visitor are from European countries. # ### **Q. Which agent makes the most number of bookings?** # checking the agent column confirmed_bookings["agent"].value_counts().head() # There are some entries with Agent_ID = 0 as they were booked without any help from an agent. So, we need to get rid of those entries. data = confirmed_bookings[~(confirmed_bookings["agent"] == 0)]["agent"].value_counts() # plotting the barplot plt.figure(figsize=(18, 6)) data.sort_values(ascending=False)[:10].plot(kind="bar") plt.xlabel("Agent ID") plt.ylabel("Number of bookings") plt.title("Top 10 Booking agents") plt.show() # * Maximum number of bookings were made by the agent with ID=**9** followed by **240, 7, 14** and **250**. # * A significant number of bookings were made without the use of any agent. # ### **Q. Which distribution channel makes the most out of bookings of each hotel?** dist_channel = ( confirmed_bookings.groupby(["distribution_channel", "hotel"])["guest_in_total"] .sum() .reset_index() ) dist_channel_p = confirmed_bookings["distribution_channel"].value_counts(normalize=True) fig, ax = plt.subplots(1, 2, figsize=(18, 8)) fig1 = sns.barplot( x="distribution_channel", y="guest_in_total", hue="hotel", data=dist_channel, ax=ax[0], ) ax[0].set_title("Distribution channels with the Least and Most Bookings for Each Hotel") ax[0].set_xlabel("Distribution Channel") ax[0].set_ylabel("Total guests") fig2 = dist_channel_p.plot.pie( ax=ax[1], explode=[0, 0.07, 0.07, 0.07, 1], autopct="%1.2f%%", shadow=True ) ax[1].set_title("Distribution Channel") plt.tight_layout() plt.show() # * The majority of bookings at the hotels come from guests in the **Online/Offline Travel Agency** distribution channel, followed by those in the Direct channel. # * **Coporate** and **GDS** channels have a low participation in the booking numbers for each hotel. # ### **Q. Which market segment makes the most out of bookings of each hotel?** market_seg = ( confirmed_bookings.groupby(["market_segment", "hotel"])["guest_in_total"] .sum() .reset_index() ) market_seg_p = confirmed_bookings["market_segment"].value_counts(normalize=True) fig, ax = plt.subplots(1, 2, figsize=(20, 8)) # plotting a barplot to check the strongest and the weakest market segment for each hotel fig1 = sns.barplot( x="market_segment", y="guest_in_total", hue="hotel", data=market_seg, ax=ax[0] ) ax[0].tick_params(axis="x", rotation=45) ax[0].set_title("Market Segments with the Least and Most Bookings for Each Hotel") ax[0].set_xlabel("Market segment") ax[0].set_ylabel("Total guests") # plotting a pieplot to fig2 = market_seg_p.plot.pie( ax=ax[1], explode=[0, 0.05, 0.05, 0.05, 0.5, 1, 2], autopct="%1.2f%%", shadow=True ) ax[1].set_title("Market Segment") plt.tight_layout() plt.show()
false
0
12,317
1
12,494
12,317
129387425
<jupyter_start><jupyter_text>fetal_brain Magnetic resonance imaging offers unrivaled visualization of the fetal brain, which forms the basis for establishing age-specific morphologic milestones. However, gauging age-appropriate neural development remains a difficult task due to the constantly changing appearance of the fetal brain, variable image quality, and frequent motion artifacts. Here we present a large diverse single-center dataset of 741 developmentally normal fetal brain MRI with their corresponding gestational ages ranging from 19 to 39 weeks, as determined by estimated delivery date based on 1st trimester US. This dataset was used to build a deep learning model that accurately and reliably predicts gestational age using attention guidance and multi-plane learning approaches. - folders 1 to 741, subdirectories for each fetal brain MRI in .jpg format, consisting of a sequence for each of the 3 planes (axial, sagittal, coronal) - 'labels.xlsx', with columns for patient ID, gestational age in days and in weeks, and axial/sagittal/coronal series numbers and middle slice file names Kaggle dataset identifier: fetal-brain <jupyter_script># import random # import cv2 # from sklearn.model_selection import train_test_split # import os # import numpy as np # # Load the dataset # image_folder_path = "/kaggle/input/fetal-brain/images" # img_size = 224 # # Iterate over the subdirectories # img_list = [] # for subdirectory in os.listdir(image_folder_path): # if os.path.isfile(os.path.join(image_folder_path, subdirectory)): # continue # # Iterate over the images in the subdirectory # for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:5]: # if image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png"): # img_list.append(os.path.join(subdirectory, image_file)) # # Shuffle the list of images # random.shuffle(img_list) # print(len(img_list)) # # Split the list of images into training and testing sets # train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) # print(len(train_image_list)) # # Create the training and testing sets # train_img = [] # train_labels = [] # for image_file in train_image_list: # train_img.append(cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size))) # train_labels.append(image_file.split('_')[0]) # test_img = [] # test_labels = [] # for image_file in test_image_list: # test_img.append(cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size))) # test_labels.append(image_file.split('_')[0]) # # Convert the training and testing sets to arrays # train_img = np.array(train_img) # test_img = np.array(test_img) # # Print the shapes of the training and testing sets # print(train_img.shape) # print(test_img.shape) # import random # import cv2 # from sklearn.model_selection import train_test_split # import os # import numpy as np # # Load the dataset # image_folder_path = "/kaggle/input/fetal-brain/images" # img_size = 224 # # Iterate over the subdirectories # img_list = [] # for subdirectory in os.listdir(image_folder_path): # if os.path.isfile(os.path.join(image_folder_path, subdirectory)): # continue # # Iterate over the images in the subdirectory # for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:5]: # if image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png"): # img_list.append(os.path.join(subdirectory, image_file)) # # Shuffle the list of images # random.shuffle(img_list) # print(len(img_list)) # # Split the list of images into training and testing sets # train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) # print(len(train_image_list)) # # Create the training and testing sets # train_img = [] # train_labels = [] # for image_file in train_image_list: # image = cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size)) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert image to grayscale # train_img.append(image) # train_labels.append(image_file.split('_')[0]) # test_img = [] # test_labels = [] # for image_file in test_image_list: # image = cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size)) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert image to grayscale # test_img.append(image) # test_labels.append(image_file.split('_')[0]) # # Convert the training and testing sets to arrays # train_img = np.array(train_img) # test_img = np.array(test_img) # # Print the shapes of the training and testing sets # print(train_img.shape) # print(test_img.shape) import random import cv2 from sklearn.model_selection import train_test_split import os import numpy as np from tensorflow.keras.applications.resnet50 import preprocess_input # Load the ResNet50 model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error # Load the dataset image_folder_path = "/kaggle/input/fetal-brain/images" img_size = 224 # Create a dictionary that maps each image file name to its label label_dict = {} for subdirectory in os.listdir(image_folder_path): if os.path.isfile(os.path.join(image_folder_path, subdirectory)): continue for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:10]: if ( image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png") ): label_dict[os.path.join(subdirectory, image_file)] = int(subdirectory) # Shuffle the list of images img_list = list(label_dict.keys()) random.shuffle(img_list) print(len(img_list)) # Split the list of images into training and testing sets train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) print(len(train_image_list)) # Create the training and testing sets train_img = [] train_labels = [] for image_file in train_image_list: train_img.append( cv2.resize( cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size), ) ) train_labels.append(label_dict[image_file]) test_img = [] test_labels = [] for image_file in test_image_list: test_img.append( cv2.resize( cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size), ) ) test_labels.append(label_dict[image_file]) # Convert the training and testing sets to arrays train_img = np.array(train_img) train_labels = np.array(train_labels) # Convert to numpy array test_img = np.array(test_img) test_labels = np.array(test_labels) # Convert to numpy array # Preprocess the images using ResNet50 preprocessing function train_img = preprocess_input(train_img) test_img = preprocess_input(test_img) base_model = ResNet50(weights="imagenet", include_top=False) # Add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # Add a fully-connected layer with 512 hidden units and ReLU activation x = Dense(512, activation="relu")(x) # Add the output layer with one neuron for regression output_layer = Dense(1)(x) # Create the model with ResNet50 base and custom output layer model = Model(inputs=base_model.input, outputs=output_layer) # Compile the model with mean absolute error loss and mean model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mae"]) # Train the model model.fit(train_img, train_labels, epochs=50, batch_size=32) # Test the model and calculate R-squared and MAE scores y_pred = model.predict(test_img) r2 = r2_score(test_labels, y_pred) mae = mean_absolute_error(test_labels, y_pred) print("R-squared score: {:.3f}".format(r2)) print("MAE: {:.3f}".format(mae))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/387/129387425.ipynb
fetal-brain
asifhasan24
[{"Id": 129387425, "ScriptId": 38208391, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6835574, "CreationDate": "05/13/2023 11:04:56", "VersionNumber": 3.0, "Title": "fetal_brain_gestorial_age_pred", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 196.0, "LinesInsertedFromPrevious": 162.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 34.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185388245, "KernelVersionId": 129387425, "SourceDatasetVersionId": 5658861}]
[{"Id": 5658861, "DatasetId": 3252286, "DatasourceVersionId": 5734278, "CreatorUserId": 6835574, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "05/10/2023 21:43:21", "VersionNumber": 1.0, "Title": "fetal_brain", "Slug": "fetal-brain", "Subtitle": "Fetal Brain MRI from Stanford Lucile Packard Children's Hospital", "Description": "Magnetic resonance imaging offers unrivaled visualization of the fetal brain, which forms the basis for establishing age-specific morphologic milestones. However, gauging age-appropriate neural development remains a difficult task due to the constantly changing appearance of the fetal brain, variable image quality, and frequent motion artifacts. Here we present a large diverse single-center dataset of 741 developmentally normal fetal brain MRI with their corresponding gestational ages ranging from 19 to 39 weeks, as determined by estimated delivery date based on 1st trimester US. This dataset was used to build a deep learning model that accurately and reliably predicts gestational age using attention guidance and multi-plane learning approaches.\n\n- folders 1 to 741, subdirectories for each fetal brain MRI in .jpg format, consisting of a sequence for each of the 3 planes (axial, sagittal, coronal)\n- 'labels.xlsx', with columns for patient ID, gestational age in days and in weeks, and axial/sagittal/coronal series numbers and middle slice file names", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3252286, "CreatorUserId": 6835574, "OwnerUserId": 6835574.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5658861.0, "CurrentDatasourceVersionId": 5734278.0, "ForumId": 3317712, "Type": 2, "CreationDate": "05/10/2023 21:43:21", "LastActivityDate": "05/10/2023", "TotalViews": 221, "TotalDownloads": 46, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 6835574, "UserName": "asifhasan24", "DisplayName": "AsifHasan24", "RegisterDate": "03/01/2021", "PerformanceTier": 1}]
# import random # import cv2 # from sklearn.model_selection import train_test_split # import os # import numpy as np # # Load the dataset # image_folder_path = "/kaggle/input/fetal-brain/images" # img_size = 224 # # Iterate over the subdirectories # img_list = [] # for subdirectory in os.listdir(image_folder_path): # if os.path.isfile(os.path.join(image_folder_path, subdirectory)): # continue # # Iterate over the images in the subdirectory # for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:5]: # if image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png"): # img_list.append(os.path.join(subdirectory, image_file)) # # Shuffle the list of images # random.shuffle(img_list) # print(len(img_list)) # # Split the list of images into training and testing sets # train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) # print(len(train_image_list)) # # Create the training and testing sets # train_img = [] # train_labels = [] # for image_file in train_image_list: # train_img.append(cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size))) # train_labels.append(image_file.split('_')[0]) # test_img = [] # test_labels = [] # for image_file in test_image_list: # test_img.append(cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size))) # test_labels.append(image_file.split('_')[0]) # # Convert the training and testing sets to arrays # train_img = np.array(train_img) # test_img = np.array(test_img) # # Print the shapes of the training and testing sets # print(train_img.shape) # print(test_img.shape) # import random # import cv2 # from sklearn.model_selection import train_test_split # import os # import numpy as np # # Load the dataset # image_folder_path = "/kaggle/input/fetal-brain/images" # img_size = 224 # # Iterate over the subdirectories # img_list = [] # for subdirectory in os.listdir(image_folder_path): # if os.path.isfile(os.path.join(image_folder_path, subdirectory)): # continue # # Iterate over the images in the subdirectory # for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:5]: # if image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png"): # img_list.append(os.path.join(subdirectory, image_file)) # # Shuffle the list of images # random.shuffle(img_list) # print(len(img_list)) # # Split the list of images into training and testing sets # train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) # print(len(train_image_list)) # # Create the training and testing sets # train_img = [] # train_labels = [] # for image_file in train_image_list: # image = cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size)) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert image to grayscale # train_img.append(image) # train_labels.append(image_file.split('_')[0]) # test_img = [] # test_labels = [] # for image_file in test_image_list: # image = cv2.resize(cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size)) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert image to grayscale # test_img.append(image) # test_labels.append(image_file.split('_')[0]) # # Convert the training and testing sets to arrays # train_img = np.array(train_img) # test_img = np.array(test_img) # # Print the shapes of the training and testing sets # print(train_img.shape) # print(test_img.shape) import random import cv2 from sklearn.model_selection import train_test_split import os import numpy as np from tensorflow.keras.applications.resnet50 import preprocess_input # Load the ResNet50 model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error # Load the dataset image_folder_path = "/kaggle/input/fetal-brain/images" img_size = 224 # Create a dictionary that maps each image file name to its label label_dict = {} for subdirectory in os.listdir(image_folder_path): if os.path.isfile(os.path.join(image_folder_path, subdirectory)): continue for image_file in os.listdir(os.path.join(image_folder_path, subdirectory))[:10]: if ( image_file.endswith(".jpg") or image_file.endswith(".jpeg") or image_file.endswith(".png") ): label_dict[os.path.join(subdirectory, image_file)] = int(subdirectory) # Shuffle the list of images img_list = list(label_dict.keys()) random.shuffle(img_list) print(len(img_list)) # Split the list of images into training and testing sets train_image_list, test_image_list = train_test_split(img_list, test_size=0.2) print(len(train_image_list)) # Create the training and testing sets train_img = [] train_labels = [] for image_file in train_image_list: train_img.append( cv2.resize( cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size), ) ) train_labels.append(label_dict[image_file]) test_img = [] test_labels = [] for image_file in test_image_list: test_img.append( cv2.resize( cv2.imread(os.path.join(image_folder_path, image_file)), (img_size, img_size), ) ) test_labels.append(label_dict[image_file]) # Convert the training and testing sets to arrays train_img = np.array(train_img) train_labels = np.array(train_labels) # Convert to numpy array test_img = np.array(test_img) test_labels = np.array(test_labels) # Convert to numpy array # Preprocess the images using ResNet50 preprocessing function train_img = preprocess_input(train_img) test_img = preprocess_input(test_img) base_model = ResNet50(weights="imagenet", include_top=False) # Add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # Add a fully-connected layer with 512 hidden units and ReLU activation x = Dense(512, activation="relu")(x) # Add the output layer with one neuron for regression output_layer = Dense(1)(x) # Create the model with ResNet50 base and custom output layer model = Model(inputs=base_model.input, outputs=output_layer) # Compile the model with mean absolute error loss and mean model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mae"]) # Train the model model.fit(train_img, train_labels, epochs=50, batch_size=32) # Test the model and calculate R-squared and MAE scores y_pred = model.predict(test_img) r2 = r2_score(test_labels, y_pred) mae = mean_absolute_error(test_labels, y_pred) print("R-squared score: {:.3f}".format(r2)) print("MAE: {:.3f}".format(mae))
false
0
2,182
0
2,458
2,182
129387867
<jupyter_start><jupyter_text>Street Network of New York in GraphML ### Context Having such a task as predicting the travel time of taxis, it can be insightful to have a deeper look at the underlying street network of the city. Network Analysis can enable us to get insights for why certain taxi trips take longer than others given some basic network properties. Examples for the analysis can be: calculate the shortest path, measure the influence of specific streets on the robustness of the network or find out which streets are key points in the network when it comes to traffic flow. ### Content This dataset contains one large Graph for the Street Network of New York City in GraphML format and a subgraph for the area of Manhattan for fast testing of your Analysis. Each Graph was created with the awesome python package https://github.com/gboeing/osmnx which is not available on Kaggle. The Graphs nodes attributes are taken from OSM and contain information to which other nodes they are connected, how long the connection is, which speed limit it has etc. Kaggle dataset identifier: street-network-of-new-york-in-graphml <jupyter_script># # NY Taxi Fare Prediction # ## Final Competition - Group 34 # import numpy as np import pandas as pd import os input_dir = "/kaggle/input/" working_dir = "/kaggle/working/" # Reading Training Data df_train = pd.read_csv( os.path.join(input_dir, "new-york-city-taxi-fare-prediction/train.csv"), nrows=100_000, parse_dates=["pickup_datetime"], ) df_train.head() # ## Preprocessing def preprocess_remove_out_of_bound( df_input: pd.DataFrame, longitude_bounds: list = [-75, -72], latitude_bounds: list = [40, 42], ) -> pd.DataFrame: pickup_in_bound = ( (df_input.pickup_longitude > longitude_bounds[0]) & (df_input.pickup_longitude < longitude_bounds[1]) & (df_input.pickup_latitude > latitude_bounds[0]) & (df_input.pickup_latitude < latitude_bounds[1]) ) dropoff_in_bound = ( (df_input.dropoff_longitude > longitude_bounds[0]) & (df_input.dropoff_longitude < longitude_bounds[1]) & (df_input.dropoff_latitude > latitude_bounds[0]) & (df_input.dropoff_latitude < latitude_bounds[1]) ) return df_input[pickup_in_bound & dropoff_in_bound] def preprocess_all(df_input: pd.DataFrame) -> pd.DataFrame: print("Before Cleansing: {}".format(len(df_input))) df_input = df_input.dropna(how="any", axis="rows") print("After Removing NaN values: {}".format(len(df_input))) df_input = preprocess_remove_out_of_bound(df_input) print("After Removing Out-of-Bounds: {}".format(len(df_input))) return df_input # ## Feature Engineering def euclidean_dist(pickup_lat, pickup_long, dropoff_lat, dropoff_long): # linear distance between pickup and dropoff location latitude_to_km = 110.574 longitude_to_km = 111.320 distance = np.sqrt( ((dropoff_lat - pickup_lat) * latitude_to_km) ** 2 + ((dropoff_long - pickup_long) * longitude_to_km) ** 2 ) return distance def haversine_dist(pickup_lat, pickup_long, dropoff_lat, dropoff_long): # shortest distance between pickup and dropoff location on a sphere dLat = (dropoff_lat - pickup_lat) * np.pi / 180.0 dLon = (dropoff_long - pickup_long) * np.pi / 180.0 lat1 = (pickup_lat) * np.pi / 180.0 lat2 = (dropoff_lat) * np.pi / 180.0 a = np.power(np.sin(dLat / 2), 2) + np.power(np.sin(dLon / 2), 2) * np.cos( lat1 ) * np.cos(lat2) rad = 6371 distance = 2 * np.arcsin(np.sqrt(a)) return rad * distance df_train = preprocess_all(df_train) df_train["euclidean_distance"] = df_train.apply( lambda row: euclidean_dist( row["pickup_latitude"], row["pickup_longitude"], row["dropoff_latitude"], row["dropoff_longitude"], ), axis=1, ) df_train["haversine_distance"] = df_train.apply( lambda row: haversine_dist( row["pickup_latitude"], row["pickup_longitude"], row["dropoff_latitude"], row["dropoff_longitude"], ), axis=1, ) df_train.head() # TEST print(euclidean_dist(41, -74, 42, -73)) print(haversine_dist(41, -74, 42, -73))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/387/129387867.ipynb
street-network-of-new-york-in-graphml
crailtap
[{"Id": 129387867, "ScriptId": 38444236, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4446608, "CreationDate": "05/13/2023 11:09:53", "VersionNumber": 1.0, "Title": "Group34_Taxi_Fare_Prediction", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185388918, "KernelVersionId": 129387867, "SourceDatasetVersionId": 3129}]
[{"Id": 3129, "DatasetId": 1807, "DatasourceVersionId": 3129, "CreatorUserId": 557933, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "08/03/2017 10:12:03", "VersionNumber": 1.0, "Title": "Street Network of New York in GraphML", "Slug": "street-network-of-new-york-in-graphml", "Subtitle": "Analyse the New York City Street Network!", "Description": "### Context\nHaving such a task as predicting the travel time of taxis, it can be insightful to have a deeper look at the underlying street network of the city. Network Analysis can enable us to get insights for why certain taxi trips take longer than others given some basic network properties. Examples for the analysis can be: calculate the shortest path, measure the influence of specific streets on the robustness of the network or find out which streets are key points in the network when it comes to traffic flow.\n\n### Content\nThis dataset contains one large Graph for the Street Network of New York City in GraphML format and a subgraph for the area of Manhattan for fast testing of your Analysis. \nEach Graph was created with the awesome python package https://github.com/gboeing/osmnx which is not available on Kaggle. The Graphs nodes attributes are taken from OSM and contain information to which other nodes they are connected, how long the connection is, which speed limit it has etc.\n\n### Acknowledgements\nhttps://github.com/gboeing/osmnx\n\n\n### Inspiration\nExplore the New York Street Network, gain a deeper understanding for network analysis and craft some useful Features for the Taxi Trip Prediction Competition!", "VersionNotes": "Initial release", "TotalCompressedBytes": 62178183.0, "TotalUncompressedBytes": 62178183.0}]
[{"Id": 1807, "CreatorUserId": 557933, "OwnerUserId": 557933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3129.0, "CurrentDatasourceVersionId": 3129.0, "ForumId": 5315, "Type": 2, "CreationDate": "08/03/2017 10:12:03", "LastActivityDate": "02/05/2018", "TotalViews": 27608, "TotalDownloads": 1295, "TotalVotes": 30, "TotalKernels": 4}]
[{"Id": 557933, "UserName": "crailtap", "DisplayName": "Chris Cross", "RegisterDate": "03/15/2016", "PerformanceTier": 3}]
# # NY Taxi Fare Prediction # ## Final Competition - Group 34 # import numpy as np import pandas as pd import os input_dir = "/kaggle/input/" working_dir = "/kaggle/working/" # Reading Training Data df_train = pd.read_csv( os.path.join(input_dir, "new-york-city-taxi-fare-prediction/train.csv"), nrows=100_000, parse_dates=["pickup_datetime"], ) df_train.head() # ## Preprocessing def preprocess_remove_out_of_bound( df_input: pd.DataFrame, longitude_bounds: list = [-75, -72], latitude_bounds: list = [40, 42], ) -> pd.DataFrame: pickup_in_bound = ( (df_input.pickup_longitude > longitude_bounds[0]) & (df_input.pickup_longitude < longitude_bounds[1]) & (df_input.pickup_latitude > latitude_bounds[0]) & (df_input.pickup_latitude < latitude_bounds[1]) ) dropoff_in_bound = ( (df_input.dropoff_longitude > longitude_bounds[0]) & (df_input.dropoff_longitude < longitude_bounds[1]) & (df_input.dropoff_latitude > latitude_bounds[0]) & (df_input.dropoff_latitude < latitude_bounds[1]) ) return df_input[pickup_in_bound & dropoff_in_bound] def preprocess_all(df_input: pd.DataFrame) -> pd.DataFrame: print("Before Cleansing: {}".format(len(df_input))) df_input = df_input.dropna(how="any", axis="rows") print("After Removing NaN values: {}".format(len(df_input))) df_input = preprocess_remove_out_of_bound(df_input) print("After Removing Out-of-Bounds: {}".format(len(df_input))) return df_input # ## Feature Engineering def euclidean_dist(pickup_lat, pickup_long, dropoff_lat, dropoff_long): # linear distance between pickup and dropoff location latitude_to_km = 110.574 longitude_to_km = 111.320 distance = np.sqrt( ((dropoff_lat - pickup_lat) * latitude_to_km) ** 2 + ((dropoff_long - pickup_long) * longitude_to_km) ** 2 ) return distance def haversine_dist(pickup_lat, pickup_long, dropoff_lat, dropoff_long): # shortest distance between pickup and dropoff location on a sphere dLat = (dropoff_lat - pickup_lat) * np.pi / 180.0 dLon = (dropoff_long - pickup_long) * np.pi / 180.0 lat1 = (pickup_lat) * np.pi / 180.0 lat2 = (dropoff_lat) * np.pi / 180.0 a = np.power(np.sin(dLat / 2), 2) + np.power(np.sin(dLon / 2), 2) * np.cos( lat1 ) * np.cos(lat2) rad = 6371 distance = 2 * np.arcsin(np.sqrt(a)) return rad * distance df_train = preprocess_all(df_train) df_train["euclidean_distance"] = df_train.apply( lambda row: euclidean_dist( row["pickup_latitude"], row["pickup_longitude"], row["dropoff_latitude"], row["dropoff_longitude"], ), axis=1, ) df_train["haversine_distance"] = df_train.apply( lambda row: haversine_dist( row["pickup_latitude"], row["pickup_longitude"], row["dropoff_latitude"], row["dropoff_longitude"], ), axis=1, ) df_train.head() # TEST print(euclidean_dist(41, -74, 42, -73)) print(haversine_dist(41, -74, 42, -73))
false
0
1,070
0
1,324
1,070
129877806
<jupyter_start><jupyter_text>Diabetes prediction dataset The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes. Kaggle dataset identifier: diabetes-prediction-dataset <jupyter_script># ## Basic Libraries import pandas as pd import numpy as np # ## Reading **"csv"** df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) df.head() # ## Exploring Data df.shape df.isnull().sum() df.nunique() # #### **'gender'** and **'smoking_history'** are the two columns with non-numeric values # ## Mapping numbers to **'gender'** and **'smoking_history'** df["gender"].unique() df["smoking_history"].unique() df["gender_label"] = df["gender"].map({"Female": 0, "Male": 1, "Other": 2}) df["smoking_label"] = df["smoking_history"].map( dict( zip(pd.Series(df["smoking_history"].unique()), pd.Series([i for i in range(6)])) ) ) # ## Creating new dataframe **'df_new'** df_new = df.drop(["gender", "smoking_history"], axis=1) df_new df_new.describe() # ## Checking for **'imbalance'** in dataset df_new["diabetes"].value_counts() # ## Using **'SMOTE'** to counter the imbalance from imblearn.over_sampling import SMOTE smote = SMOTE(sampling_strategy="minority") X_sm, y_sm = smote.fit_resample(df_new.drop(["diabetes"], axis=1), df_new["diabetes"]) y_sm.value_counts() # ## **train_test_split** from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X_sm, y_sm, random_state=23, test_size=0.33, stratify=y_sm ) # ## **Random_Forest** from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=10) rf.fit(X_train, y_train) rf.score(X_test, y_test) y_pred = rf.predict(X_test) # ## Classification Report **(97 % )** from sklearn.metrics import classification_report, confusion_matrix print(" Classification Report : \n\n\n ", classification_report(y_test, y_pred)) # ## **Confusion Matrix** and **Heatmap** cm = confusion_matrix(y_test, y_pred) import seaborn as sn import matplotlib.pyplot as plt plt.figure(figsize=(2, 2)) sn.heatmap(cm, annot=True, fmt="d") plt.xlabel("Prediction") plt.ylabel("Truth") plt.title("Confusion Matrix Heatmap") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/877/129877806.ipynb
diabetes-prediction-dataset
iammustafatz
[{"Id": 129877806, "ScriptId": 38600762, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13051013, "CreationDate": "05/17/2023 06:24:24", "VersionNumber": 2.0, "Title": "(97% Accuracy) Diabetes prediction - Random Forest", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 103.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 103.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 186282279, "KernelVersionId": 129877806, "SourceDatasetVersionId": 5344155}]
[{"Id": 5344155, "DatasetId": 3102947, "DatasourceVersionId": 5417553, "CreatorUserId": 11427441, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/08/2023 06:11:45", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "Slug": "diabetes-prediction-dataset", "Subtitle": "A Comprehensive Dataset for Predicting Diabetes with Medical & Demographic Data", "Description": "The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3102947, "CreatorUserId": 11427441, "OwnerUserId": 11427441.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5344155.0, "CurrentDatasourceVersionId": 5417553.0, "ForumId": 3166206, "Type": 2, "CreationDate": "04/08/2023 06:11:45", "LastActivityDate": "04/08/2023", "TotalViews": 127619, "TotalDownloads": 24886, "TotalVotes": 309, "TotalKernels": 120}]
[{"Id": 11427441, "UserName": "iammustafatz", "DisplayName": "Mohammed Mustafa", "RegisterDate": "08/29/2022", "PerformanceTier": 0}]
# ## Basic Libraries import pandas as pd import numpy as np # ## Reading **"csv"** df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) df.head() # ## Exploring Data df.shape df.isnull().sum() df.nunique() # #### **'gender'** and **'smoking_history'** are the two columns with non-numeric values # ## Mapping numbers to **'gender'** and **'smoking_history'** df["gender"].unique() df["smoking_history"].unique() df["gender_label"] = df["gender"].map({"Female": 0, "Male": 1, "Other": 2}) df["smoking_label"] = df["smoking_history"].map( dict( zip(pd.Series(df["smoking_history"].unique()), pd.Series([i for i in range(6)])) ) ) # ## Creating new dataframe **'df_new'** df_new = df.drop(["gender", "smoking_history"], axis=1) df_new df_new.describe() # ## Checking for **'imbalance'** in dataset df_new["diabetes"].value_counts() # ## Using **'SMOTE'** to counter the imbalance from imblearn.over_sampling import SMOTE smote = SMOTE(sampling_strategy="minority") X_sm, y_sm = smote.fit_resample(df_new.drop(["diabetes"], axis=1), df_new["diabetes"]) y_sm.value_counts() # ## **train_test_split** from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X_sm, y_sm, random_state=23, test_size=0.33, stratify=y_sm ) # ## **Random_Forest** from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=10) rf.fit(X_train, y_train) rf.score(X_test, y_test) y_pred = rf.predict(X_test) # ## Classification Report **(97 % )** from sklearn.metrics import classification_report, confusion_matrix print(" Classification Report : \n\n\n ", classification_report(y_test, y_pred)) # ## **Confusion Matrix** and **Heatmap** cm = confusion_matrix(y_test, y_pred) import seaborn as sn import matplotlib.pyplot as plt plt.figure(figsize=(2, 2)) sn.heatmap(cm, annot=True, fmt="d") plt.xlabel("Prediction") plt.ylabel("Truth") plt.title("Confusion Matrix Heatmap") plt.show()
false
1
715
3
905
715
129877144
<jupyter_start><jupyter_text>Car driving risk analysis Kaggle dataset identifier: car-driving-risk-analysis <jupyter_script>import pandas as pd import matplotlib.pyplot as plt import seaborn as sb data = pd.read_csv( "/kaggle/input/car-driving-risk-analysis/car driving risk analysis.csv" ) data.head() data.describe() data.isnull().sum() plt.boxplot(data) plt.show() from sklearn.model_selection import train_test_split X = data["speed"] y = data["risk"] X = X.values.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1) from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, y_train) lr.predict(X_test) lr.coef_ lr.intercept_ lr.score(X_test, y_test) from sklearn.neural_network import MLPRegressor mlp = MLPRegressor() mlp.fit(X_train, y_train) mlp.predict(X_test) mlp.score(X_test, y_test) mlp.predict([[200]])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/877/129877144.ipynb
car-driving-risk-analysis
studymart
[{"Id": 129877144, "ScriptId": 38625321, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13387423, "CreationDate": "05/17/2023 06:18:20", "VersionNumber": 1.0, "Title": "Car Driving Risk Analysis", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 47.0, "LinesInsertedFromPrevious": 47.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186281340, "KernelVersionId": 129877144, "SourceDatasetVersionId": 1287441}]
[{"Id": 1287441, "DatasetId": 743400, "DatasourceVersionId": 1319536, "CreatorUserId": 5367665, "LicenseName": "Unknown", "CreationDate": "06/28/2020 13:52:53", "VersionNumber": 1.0, "Title": "Car driving risk analysis", "Slug": "car-driving-risk-analysis", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 743400, "CreatorUserId": 5367665, "OwnerUserId": 5367665.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1287441.0, "CurrentDatasourceVersionId": 1319536.0, "ForumId": 758293, "Type": 2, "CreationDate": "06/28/2020 13:52:53", "LastActivityDate": "06/28/2020", "TotalViews": 8490, "TotalDownloads": 1564, "TotalVotes": 61, "TotalKernels": 8}]
[{"Id": 5367665, "UserName": "studymart", "DisplayName": "Study Mart", "RegisterDate": "06/25/2020", "PerformanceTier": 0}]
import pandas as pd import matplotlib.pyplot as plt import seaborn as sb data = pd.read_csv( "/kaggle/input/car-driving-risk-analysis/car driving risk analysis.csv" ) data.head() data.describe() data.isnull().sum() plt.boxplot(data) plt.show() from sklearn.model_selection import train_test_split X = data["speed"] y = data["risk"] X = X.values.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1) from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, y_train) lr.predict(X_test) lr.coef_ lr.intercept_ lr.score(X_test, y_test) from sklearn.neural_network import MLPRegressor mlp = MLPRegressor() mlp.fit(X_train, y_train) mlp.predict(X_test) mlp.score(X_test, y_test) mlp.predict([[200]])
false
1
295
0
321
295
129877729
import pandas as pd import os import numpy as np datapath1 = "/kaggle/input/cmpg313final/A.I-COVID/covid-chestxray-dataset-master" dataset_path = "/kaggle/input/cmpg313final/A.I-COVID/dataset" categories = os.listdir(dataset_path) print(categories) dataset = pd.read_csv(os.path.join(datapath1, "metadata.csv")) findings = dataset["finding"] image_names = dataset["filename"] positives_index = np.concatenate( (np.where(findings == "COVID-19")[0], np.where(findings == "SARS")[0]) ) positive_image_names = image_names[positives_index] import cv2 for positive_image_name in positive_image_names: image = cv2.imread(os.path.join(datapath1, "images", positive_image_name)) try: cv2.imwrite( os.path.join(dataset_path, categories[1], positive_image_name), image ) except Exception as e: print(e) datapath2 = "/kaggle/input/cmpg313final/A.I-COVID/archive (2)" dataset = pd.read_csv( os.path.join( datapath2, "/kaggle/input/cmpg313final/A.I-COVID/archive (2)/Chest_xray_Corona_Metadata.csv", ) ) findings = dataset["Label"] image_names = dataset["X_ray_image_name"] negative_index = np.where(findings == "Normal")[0] negative_image_names = image_names[negative_index] for negative_image_name in negative_image_names: image = cv2.imread(os.path.join(datapath2, "images", negative_image_name)) try: cv2.imwrite( os.path.join(dataset_path, categories[0], negative_image_name), image ) except Exception as e: print(e) negative_image_names.shape # DATA PREPROCESSING FOR CMPG313final A.I-COVID import cv2, os data_path = "/kaggle/input/cmpg313final/A.I-COVID/dataset" categories = os.listdir(data_path) labels = [i for i in range(len(categories))] label_dict = dict(zip(categories, labels)) # empty dictionary print(label_dict) print(categories) print(labels) img_size = 100 data = [] target = [] for category in categories: folder_path = os.path.join(data_path, category) img_names = os.listdir(folder_path) for img_name in img_names: img_path = os.path.join(folder_path, img_name) img = cv2.imread(img_path) try: gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Coverting the image into gray scale resized = cv2.resize(gray, (img_size, img_size)) # resizing the gray scale into 100x100, since we need a fixed common size for all the images in the dataset data.append(resized) target.append(label_dict[category]) # appending the image and the label(categorized) into the list (dataset) except Exception as e: print("Exception:", e) # if any exception rasied, the exception will be printed here. And pass to the next image import numpy as np data = np.array(data) / 255.0 data = np.reshape(data, (data.shape[0], img_size, img_size, 1)) target = np.array(target) from keras.utils import np_utils new_target = np_utils.to_categorical(target) # TRAINING THE CNN # ![image.png](attachment:9f95e0c6-1526-4913-8e48-5f89677f7c50.png) import numpy as np data = np.load("/kaggle/input/cmpg313final/A.I-COVID/data.npy") target = np.load("/kaggle/input/cmpg313final/A.I-COVID/target.npy") from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, Activation, MaxPooling2D from keras.utils import normalize from keras.layers import Concatenate from keras import Input from keras.callbacks import ModelCheckpoint input_shape = data.shape[1:] # 50,50,1 inp = Input(shape=input_shape) convs = [] parrallel_kernels = [3, 5, 7] for k in range(len(parrallel_kernels)): conv = Conv2D( 128, parrallel_kernels[k], padding="same", activation="relu", input_shape=input_shape, strides=1, )(inp) convs.append(conv) out = Concatenate()(convs) conv_model = Model(inputs=inp, outputs=out) model = Sequential() model.add(conv_model) model.add(Conv2D(64, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(128, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(64, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(2, input_dim=128, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary() from sklearn.model_selection import train_test_split train_data, test_data, train_target, test_target = train_test_split( data, target, test_size=0.1 ) checkpoint = ModelCheckpoint( "model-{epoch:03d}.model", monitor="val_loss", verbose=0, save_best_only=True, mode="auto", ) history = model.fit( train_data, train_target, epochs=15, callbacks=[checkpoint], validation_split=0.1 ) from matplotlib import pyplot as plt plt.plot(history.history["accuracy"], "r", label="training accuracy") plt.plot(history.history["val_accuracy"], label="validation accuracy") plt.xlabel("# epochs") plt.ylabel("loss") plt.legend() plt.show() plt.plot(history.history["accuracy"], "r", label="training accuracy") plt.plot(history.history["val_accuracy"], label="validation accuracy") plt.xlabel("# epochs") plt.ylabel("loss") plt.legend() plt.show() print(model.evaluate(test_data, test_target)) from kaggle_datasets import KaggleDatasets import os os.chdir("/kaggle/input/cmpg313final/A.I-COVID/webapp") from IPython.display import HTML with open("/kaggle/input/cmpg313final/A.I-COVID/webapp/templates/index.html", "r") as f: html_string = f.read() HTML(html_string)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/877/129877729.ipynb
null
null
[{"Id": 129877729, "ScriptId": 37901467, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13181128, "CreationDate": "05/17/2023 06:23:50", "VersionNumber": 1.0, "Title": "final cmpg313 A.I project", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 186.0, "LinesInsertedFromPrevious": 186.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import os import numpy as np datapath1 = "/kaggle/input/cmpg313final/A.I-COVID/covid-chestxray-dataset-master" dataset_path = "/kaggle/input/cmpg313final/A.I-COVID/dataset" categories = os.listdir(dataset_path) print(categories) dataset = pd.read_csv(os.path.join(datapath1, "metadata.csv")) findings = dataset["finding"] image_names = dataset["filename"] positives_index = np.concatenate( (np.where(findings == "COVID-19")[0], np.where(findings == "SARS")[0]) ) positive_image_names = image_names[positives_index] import cv2 for positive_image_name in positive_image_names: image = cv2.imread(os.path.join(datapath1, "images", positive_image_name)) try: cv2.imwrite( os.path.join(dataset_path, categories[1], positive_image_name), image ) except Exception as e: print(e) datapath2 = "/kaggle/input/cmpg313final/A.I-COVID/archive (2)" dataset = pd.read_csv( os.path.join( datapath2, "/kaggle/input/cmpg313final/A.I-COVID/archive (2)/Chest_xray_Corona_Metadata.csv", ) ) findings = dataset["Label"] image_names = dataset["X_ray_image_name"] negative_index = np.where(findings == "Normal")[0] negative_image_names = image_names[negative_index] for negative_image_name in negative_image_names: image = cv2.imread(os.path.join(datapath2, "images", negative_image_name)) try: cv2.imwrite( os.path.join(dataset_path, categories[0], negative_image_name), image ) except Exception as e: print(e) negative_image_names.shape # DATA PREPROCESSING FOR CMPG313final A.I-COVID import cv2, os data_path = "/kaggle/input/cmpg313final/A.I-COVID/dataset" categories = os.listdir(data_path) labels = [i for i in range(len(categories))] label_dict = dict(zip(categories, labels)) # empty dictionary print(label_dict) print(categories) print(labels) img_size = 100 data = [] target = [] for category in categories: folder_path = os.path.join(data_path, category) img_names = os.listdir(folder_path) for img_name in img_names: img_path = os.path.join(folder_path, img_name) img = cv2.imread(img_path) try: gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Coverting the image into gray scale resized = cv2.resize(gray, (img_size, img_size)) # resizing the gray scale into 100x100, since we need a fixed common size for all the images in the dataset data.append(resized) target.append(label_dict[category]) # appending the image and the label(categorized) into the list (dataset) except Exception as e: print("Exception:", e) # if any exception rasied, the exception will be printed here. And pass to the next image import numpy as np data = np.array(data) / 255.0 data = np.reshape(data, (data.shape[0], img_size, img_size, 1)) target = np.array(target) from keras.utils import np_utils new_target = np_utils.to_categorical(target) # TRAINING THE CNN # ![image.png](attachment:9f95e0c6-1526-4913-8e48-5f89677f7c50.png) import numpy as np data = np.load("/kaggle/input/cmpg313final/A.I-COVID/data.npy") target = np.load("/kaggle/input/cmpg313final/A.I-COVID/target.npy") from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, Activation, MaxPooling2D from keras.utils import normalize from keras.layers import Concatenate from keras import Input from keras.callbacks import ModelCheckpoint input_shape = data.shape[1:] # 50,50,1 inp = Input(shape=input_shape) convs = [] parrallel_kernels = [3, 5, 7] for k in range(len(parrallel_kernels)): conv = Conv2D( 128, parrallel_kernels[k], padding="same", activation="relu", input_shape=input_shape, strides=1, )(inp) convs.append(conv) out = Concatenate()(convs) conv_model = Model(inputs=inp, outputs=out) model = Sequential() model.add(conv_model) model.add(Conv2D(64, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(128, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(64, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(2, input_dim=128, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary() from sklearn.model_selection import train_test_split train_data, test_data, train_target, test_target = train_test_split( data, target, test_size=0.1 ) checkpoint = ModelCheckpoint( "model-{epoch:03d}.model", monitor="val_loss", verbose=0, save_best_only=True, mode="auto", ) history = model.fit( train_data, train_target, epochs=15, callbacks=[checkpoint], validation_split=0.1 ) from matplotlib import pyplot as plt plt.plot(history.history["accuracy"], "r", label="training accuracy") plt.plot(history.history["val_accuracy"], label="validation accuracy") plt.xlabel("# epochs") plt.ylabel("loss") plt.legend() plt.show() plt.plot(history.history["accuracy"], "r", label="training accuracy") plt.plot(history.history["val_accuracy"], label="validation accuracy") plt.xlabel("# epochs") plt.ylabel("loss") plt.legend() plt.show() print(model.evaluate(test_data, test_target)) from kaggle_datasets import KaggleDatasets import os os.chdir("/kaggle/input/cmpg313final/A.I-COVID/webapp") from IPython.display import HTML with open("/kaggle/input/cmpg313final/A.I-COVID/webapp/templates/index.html", "r") as f: html_string = f.read() HTML(html_string)
false
0
1,905
0
1,905
1,905
129994308
# # Animated Visualization from Sequence ID # Function `animate` takes a sequence ID and produces an animation of that sequence. Can be configured to show different sets of landmarks, colors, and figure settings. # # Setup # [mediapipe on github](https://github.com/google/mediapipe/tree/master/mediapipe/python/solutions) import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.ioff() # turn off interactive plotting from matplotlib.animation import FuncAnimation import mediapipe as mp from types import SimpleNamespace from IPython.display import HTML from tqdm import tqdm root_dir = "/kaggle/input/asl-fingerspelling/" train = pd.read_csv(root_dir + "train.csv") sm = pd.read_csv(root_dir + "supplemental_metadata.csv") cfg = SimpleNamespace() # front-end settings cfg.parts = { # choose which landmarks to display in animation "right_hand": True, "left_hand": True, "pose": True, "lips": False, "left_eye": False, "left_iris": False, "left_eyebrow": False, "right_eye": False, "right_eyebrow": False, "right_iris": False, "face_oval": True, "nose": False, "contours": False, "irises": False, "tesselation": False, } cfg.parts["face"] = any( [ cfg.parts[p] for p in cfg.parts.keys() if p not in ["right_hand", "left_hand", "pose"] ] ) cfg.colors = { # choose colors for each part "right_hand": "firebrick", "left_hand": "darkgoldenrod", "pose": "teal", "face": "teal", } cfg.linewidth = 2 # linewidth obvs cfg.figsize = (5, 5) # figure size obvs cfg.xy_lims = ( -0.5, 1.5, ) # limits for x and y, same limits for both to preserve aspect ratio cfg.fps = 8 # frames per second cfg.interval = int(1000 / cfg.fps) # delay between frames in milliseconds cfg.repeat = False # whether to loop animation (can also just change in viewing pane) # back-end settings cfg.connections = { "right_hand": mp.solutions.hands_connections.HAND_CONNECTIONS, "left_hand": mp.solutions.hands_connections.HAND_CONNECTIONS, "pose": mp.solutions.pose_connections.POSE_CONNECTIONS, "lips": mp.solutions.face_mesh_connections.FACEMESH_LIPS, "left_eye": mp.solutions.face_mesh_connections.FACEMESH_LEFT_EYE, "left_iris": mp.solutions.face_mesh_connections.FACEMESH_LEFT_IRIS, "left_eyebrow": mp.solutions.face_mesh_connections.FACEMESH_LEFT_EYEBROW, "right_eye": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_EYE, "right_eyebrow": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_EYEBROW, "right_iris": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_IRIS, "face_oval": mp.solutions.face_mesh_connections.FACEMESH_FACE_OVAL, "nose": mp.solutions.face_mesh_connections.FACEMESH_NOSE, "contours": mp.solutions.face_mesh_connections.FACEMESH_CONTOURS, "irises": mp.solutions.face_mesh_connections.FACEMESH_IRISES, "tesselation": mp.solutions.face_mesh_connections.FACEMESH_TESSELATION, } cfg.connections["face"] = frozenset().union( *[ cfg.connections[p] for p in cfg.parts.keys() if cfg.parts[p] and p not in ["right_hand", "left_hand", "pose", "face"] ] ) cfg.columns = ( None # not implemented for now, but possible to only load columns we're using ) # ## `def animate` # takes a sequence_id, shows animation def animate(sid): pq_path = root_dir + train.loc[train["sequence_id"] == sid, "path"].item() pq = pd.read_parquet(pq_path, columns=cfg.columns) # load parquet file pq = pq.loc[pq.index == sid] # extract the sequence we want frames = pq["frame"].values # number of frames lines = [[] for _ in frames] # initialize list of line segments in each frame phrase = train.loc[train["sequence_id"] == sid, "phrase"].item() # sample label for frame in frames: coords = {} # to store coordinates for part in ["pose", "face", "right_hand", "left_hand"]: if not cfg.parts[part]: # if we're not drawing the part, skip it continue x_cols = pq.columns.str.contains( "x_" + part ) # get names of x-coord columns x = pq.loc[pq["frame"] == frame, x_cols].values[0] # get x-coords y_cols = pq.columns.str.contains( "y_" + part ) # get names of y-coord columns y = 1 - pq.loc[pq["frame"] == frame, y_cols].values[0] # get y-coords # y = 1-y because frames appear to be upside-down for i, j in cfg.connections[part]: # add lines to lines try: lines[frame] += [(x[i], x[j]), (y[i], y[j]), cfg.colors[part]] except IndexError as e: pass # not sure why were getting index errors, just ignoring them for now # print(f'part: {part}') # print(f'(i, j): {(i, j)}') fig, ax = plt.subplots(figsize=cfg.figsize) progress = tqdm(total=len(frames), position=0, leave=True) def draw(frame): ax.clear() ax.plot(*lines[frame], linewidth=cfg.linewidth) ax.set_xlim(cfg.xy_lims) ax.set_ylim(cfg.xy_lims) ax.set_title(f"seq {sid}: {phrase}") ax.set_xlabel(f"{frame}/{len(lines)}") progress.update() anim = FuncAnimation( fig=fig, func=draw, frames=len(frames), interval=cfg.interval, repeat=cfg.repeat, ) display(HTML(anim.to_jshtml())) # # Animations sid = train["sequence_id"].values[54] # choose sequence ID to visualize anim = animate(sid) # can take a minute or two for longer sequences
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/994/129994308.ipynb
null
null
[{"Id": 129994308, "ScriptId": 38606929, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3424928, "CreationDate": "05/18/2023 02:20:39", "VersionNumber": 1.0, "Title": "Simple Animated Visualization", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # Animated Visualization from Sequence ID # Function `animate` takes a sequence ID and produces an animation of that sequence. Can be configured to show different sets of landmarks, colors, and figure settings. # # Setup # [mediapipe on github](https://github.com/google/mediapipe/tree/master/mediapipe/python/solutions) import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.ioff() # turn off interactive plotting from matplotlib.animation import FuncAnimation import mediapipe as mp from types import SimpleNamespace from IPython.display import HTML from tqdm import tqdm root_dir = "/kaggle/input/asl-fingerspelling/" train = pd.read_csv(root_dir + "train.csv") sm = pd.read_csv(root_dir + "supplemental_metadata.csv") cfg = SimpleNamespace() # front-end settings cfg.parts = { # choose which landmarks to display in animation "right_hand": True, "left_hand": True, "pose": True, "lips": False, "left_eye": False, "left_iris": False, "left_eyebrow": False, "right_eye": False, "right_eyebrow": False, "right_iris": False, "face_oval": True, "nose": False, "contours": False, "irises": False, "tesselation": False, } cfg.parts["face"] = any( [ cfg.parts[p] for p in cfg.parts.keys() if p not in ["right_hand", "left_hand", "pose"] ] ) cfg.colors = { # choose colors for each part "right_hand": "firebrick", "left_hand": "darkgoldenrod", "pose": "teal", "face": "teal", } cfg.linewidth = 2 # linewidth obvs cfg.figsize = (5, 5) # figure size obvs cfg.xy_lims = ( -0.5, 1.5, ) # limits for x and y, same limits for both to preserve aspect ratio cfg.fps = 8 # frames per second cfg.interval = int(1000 / cfg.fps) # delay between frames in milliseconds cfg.repeat = False # whether to loop animation (can also just change in viewing pane) # back-end settings cfg.connections = { "right_hand": mp.solutions.hands_connections.HAND_CONNECTIONS, "left_hand": mp.solutions.hands_connections.HAND_CONNECTIONS, "pose": mp.solutions.pose_connections.POSE_CONNECTIONS, "lips": mp.solutions.face_mesh_connections.FACEMESH_LIPS, "left_eye": mp.solutions.face_mesh_connections.FACEMESH_LEFT_EYE, "left_iris": mp.solutions.face_mesh_connections.FACEMESH_LEFT_IRIS, "left_eyebrow": mp.solutions.face_mesh_connections.FACEMESH_LEFT_EYEBROW, "right_eye": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_EYE, "right_eyebrow": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_EYEBROW, "right_iris": mp.solutions.face_mesh_connections.FACEMESH_RIGHT_IRIS, "face_oval": mp.solutions.face_mesh_connections.FACEMESH_FACE_OVAL, "nose": mp.solutions.face_mesh_connections.FACEMESH_NOSE, "contours": mp.solutions.face_mesh_connections.FACEMESH_CONTOURS, "irises": mp.solutions.face_mesh_connections.FACEMESH_IRISES, "tesselation": mp.solutions.face_mesh_connections.FACEMESH_TESSELATION, } cfg.connections["face"] = frozenset().union( *[ cfg.connections[p] for p in cfg.parts.keys() if cfg.parts[p] and p not in ["right_hand", "left_hand", "pose", "face"] ] ) cfg.columns = ( None # not implemented for now, but possible to only load columns we're using ) # ## `def animate` # takes a sequence_id, shows animation def animate(sid): pq_path = root_dir + train.loc[train["sequence_id"] == sid, "path"].item() pq = pd.read_parquet(pq_path, columns=cfg.columns) # load parquet file pq = pq.loc[pq.index == sid] # extract the sequence we want frames = pq["frame"].values # number of frames lines = [[] for _ in frames] # initialize list of line segments in each frame phrase = train.loc[train["sequence_id"] == sid, "phrase"].item() # sample label for frame in frames: coords = {} # to store coordinates for part in ["pose", "face", "right_hand", "left_hand"]: if not cfg.parts[part]: # if we're not drawing the part, skip it continue x_cols = pq.columns.str.contains( "x_" + part ) # get names of x-coord columns x = pq.loc[pq["frame"] == frame, x_cols].values[0] # get x-coords y_cols = pq.columns.str.contains( "y_" + part ) # get names of y-coord columns y = 1 - pq.loc[pq["frame"] == frame, y_cols].values[0] # get y-coords # y = 1-y because frames appear to be upside-down for i, j in cfg.connections[part]: # add lines to lines try: lines[frame] += [(x[i], x[j]), (y[i], y[j]), cfg.colors[part]] except IndexError as e: pass # not sure why were getting index errors, just ignoring them for now # print(f'part: {part}') # print(f'(i, j): {(i, j)}') fig, ax = plt.subplots(figsize=cfg.figsize) progress = tqdm(total=len(frames), position=0, leave=True) def draw(frame): ax.clear() ax.plot(*lines[frame], linewidth=cfg.linewidth) ax.set_xlim(cfg.xy_lims) ax.set_ylim(cfg.xy_lims) ax.set_title(f"seq {sid}: {phrase}") ax.set_xlabel(f"{frame}/{len(lines)}") progress.update() anim = FuncAnimation( fig=fig, func=draw, frames=len(frames), interval=cfg.interval, repeat=cfg.repeat, ) display(HTML(anim.to_jshtml())) # # Animations sid = train["sequence_id"].values[54] # choose sequence ID to visualize anim = animate(sid) # can take a minute or two for longer sequences
false
0
1,677
2
1,677
1,677
129994606
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Human Weight Prediction** # Data wrangling, transfor and analysis. Spark ML model, creating, saving and loading. # Objectives: # * Data web scraping, cleaning and transform data into a dataframe # * Create a simple Linear Regression Model, save and load the SparkML model # * Make predictions using the loaded SparkML model # Acknowledgement: SOCR Data Dinov 020108 HeightsWeights # Link: http://socr.ucla.edu/docs/resources/SOCR_Data/SOCR_Data_Dinov_020108_HeightsWeights.html # Import pandas as pd import pandas as pd # Define path to HTML file html_path = "http://socr.ucla.edu/docs/resources/SOCR_Data/SOCR_Data_Dinov_020108_HeightsWeights.html" # Read tables from HTML file and pass to pandas dataframe table = pd.read_html(html_path) df = table[0] df.head() df.drop(index=df.index[0], inplace=True) # Drop first row del df[0] # Delete Index column df = df.astype(float) # Change value types (string into float) df.head() # Metric conversions # Convert inches to cm df[1] = 2.54 * df[1] # Convert pounds to kg df[2] = df[2] * 0.453592 df = df.round(2) # Round to two decimals df.head() # Install spark import findspark findspark.init() # Start Spark session # Import pyspark, Spark API for Python from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession # Creating a spark context class sc = SparkContext() # Creating a spark session spark = ( SparkSession.builder.appName("Python Spark DataFrames basic example") .config("spark.some.config.option", "some-value") .getOrCreate() ) # Start sesion spark # Create PySpark DataFrame from Pandas columns = ["height", "weight"] dataframe = spark.createDataFrame(df, columns) dataframe.printSchema() dataframe.show(5) # Import Spark ML libraries import findspark findspark.init() from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import LinearRegression # Converting data frame columns into feature vectors assembler = VectorAssembler(inputCols=["height"], outputCol="features") data = assembler.transform(dataframe).select("features", "weight") data.show(5) # Create and Train model lr = LinearRegression(featuresCol="features", labelCol="weight", maxIter=100) lr.setRegParam(0.1) # Fit the model lrModel = lr.fit(data) # Save the model Human Weight and Height lrModel.save("human_height.model") # Load the model from pyspark.ml.regression import ( LinearRegressionModel, ) # LinearRegressionModel to load the model model = LinearRegressionModel.load("human_height.model") # Make Prediction # This function converts a scalar number into a dataframe that can be used by the model to predict. def predict(weight): assembler = VectorAssembler(inputCols=["weight"], outputCol="features") data = [[weight, 0]] columns = ["weight", "height"] _ = spark.createDataFrame(data, columns) __ = assembler.transform(_).select("features", "height") predictions = model.transform(__) predictions.select("prediction").show() # Predict the weight of person who height is 170 cm predict(170) # Predict the weight of person who height is 170 cm predict(150) # Predict the weight of person who height is 170 cm predict(200)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/994/129994606.ipynb
null
null
[{"Id": 129994606, "ScriptId": 38627025, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13587595, "CreationDate": "05/18/2023 02:24:11", "VersionNumber": 1.0, "Title": "notebookf4dfe00a47", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 145.0, "LinesInsertedFromPrevious": 145.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Human Weight Prediction** # Data wrangling, transfor and analysis. Spark ML model, creating, saving and loading. # Objectives: # * Data web scraping, cleaning and transform data into a dataframe # * Create a simple Linear Regression Model, save and load the SparkML model # * Make predictions using the loaded SparkML model # Acknowledgement: SOCR Data Dinov 020108 HeightsWeights # Link: http://socr.ucla.edu/docs/resources/SOCR_Data/SOCR_Data_Dinov_020108_HeightsWeights.html # Import pandas as pd import pandas as pd # Define path to HTML file html_path = "http://socr.ucla.edu/docs/resources/SOCR_Data/SOCR_Data_Dinov_020108_HeightsWeights.html" # Read tables from HTML file and pass to pandas dataframe table = pd.read_html(html_path) df = table[0] df.head() df.drop(index=df.index[0], inplace=True) # Drop first row del df[0] # Delete Index column df = df.astype(float) # Change value types (string into float) df.head() # Metric conversions # Convert inches to cm df[1] = 2.54 * df[1] # Convert pounds to kg df[2] = df[2] * 0.453592 df = df.round(2) # Round to two decimals df.head() # Install spark import findspark findspark.init() # Start Spark session # Import pyspark, Spark API for Python from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession # Creating a spark context class sc = SparkContext() # Creating a spark session spark = ( SparkSession.builder.appName("Python Spark DataFrames basic example") .config("spark.some.config.option", "some-value") .getOrCreate() ) # Start sesion spark # Create PySpark DataFrame from Pandas columns = ["height", "weight"] dataframe = spark.createDataFrame(df, columns) dataframe.printSchema() dataframe.show(5) # Import Spark ML libraries import findspark findspark.init() from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import LinearRegression # Converting data frame columns into feature vectors assembler = VectorAssembler(inputCols=["height"], outputCol="features") data = assembler.transform(dataframe).select("features", "weight") data.show(5) # Create and Train model lr = LinearRegression(featuresCol="features", labelCol="weight", maxIter=100) lr.setRegParam(0.1) # Fit the model lrModel = lr.fit(data) # Save the model Human Weight and Height lrModel.save("human_height.model") # Load the model from pyspark.ml.regression import ( LinearRegressionModel, ) # LinearRegressionModel to load the model model = LinearRegressionModel.load("human_height.model") # Make Prediction # This function converts a scalar number into a dataframe that can be used by the model to predict. def predict(weight): assembler = VectorAssembler(inputCols=["weight"], outputCol="features") data = [[weight, 0]] columns = ["weight", "height"] _ = spark.createDataFrame(data, columns) __ = assembler.transform(_).select("features", "height") predictions = model.transform(__) predictions.select("prediction").show() # Predict the weight of person who height is 170 cm predict(170) # Predict the weight of person who height is 170 cm predict(150) # Predict the weight of person who height is 170 cm predict(200)
false
0
1,126
0
1,126
1,126
129800238
from bs4 import BeautifulSoup import pandas as pd import requests url = "https://ca.finance.yahoo.com/mutualfunds/" response = requests.get(url) soup = BeautifulSoup(response.content, "html.parser") table = soup.find("table", class_="W(100%)") table.find_all("th") header = [] # th-->table header for th in table.find_all("th")[:-1]: header.append(th.text.strip()) data = [] # tr-->table record for tr in table.find_all("tr")[1:]: row = [] for td in tr.find_all("td")[:-1]: row.append(td.text.strip()) data.append(row) df = pd.DataFrame(data=data, columns=header) df.to_csv("mutual_funds_data.csv", index=False) df import pandas as pd df = pd.read_csv("mutual_funds_data.csv") summary_stats = df.describe() print(summary_stats) import matplotlib.pyplot as plt names = df["Name"] prices = df["Price (Intraday)"] df_sorted = df.sort_values(by="Price (Intraday)", ascending=False) plt.bar(df_sorted["Name"], df_sorted["Price (Intraday)"]) plt.xlabel("Company Name") plt.ylabel("Price (Intraday)") plt.title("Top 10 Companies by Price (Intraday)") plt.xticks(rotation=90) plt.show() # Create the line chart # plt.plot(names, prices, marker='o') # scatter plot plt.scatter(df["200 Day Average"], df["Price (Intraday)"]) plt.xlabel("200 Day Average") plt.ylabel("Price (Intraday)") plt.title("Relationship between 200 Day Average and Price (Intraday)") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/800/129800238.ipynb
null
null
[{"Id": 129800238, "ScriptId": 38603249, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14487588, "CreationDate": "05/16/2023 14:49:02", "VersionNumber": 1.0, "Title": "scraping mutual funds data", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 64.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
null
null
null
null
from bs4 import BeautifulSoup import pandas as pd import requests url = "https://ca.finance.yahoo.com/mutualfunds/" response = requests.get(url) soup = BeautifulSoup(response.content, "html.parser") table = soup.find("table", class_="W(100%)") table.find_all("th") header = [] # th-->table header for th in table.find_all("th")[:-1]: header.append(th.text.strip()) data = [] # tr-->table record for tr in table.find_all("tr")[1:]: row = [] for td in tr.find_all("td")[:-1]: row.append(td.text.strip()) data.append(row) df = pd.DataFrame(data=data, columns=header) df.to_csv("mutual_funds_data.csv", index=False) df import pandas as pd df = pd.read_csv("mutual_funds_data.csv") summary_stats = df.describe() print(summary_stats) import matplotlib.pyplot as plt names = df["Name"] prices = df["Price (Intraday)"] df_sorted = df.sort_values(by="Price (Intraday)", ascending=False) plt.bar(df_sorted["Name"], df_sorted["Price (Intraday)"]) plt.xlabel("Company Name") plt.ylabel("Price (Intraday)") plt.title("Top 10 Companies by Price (Intraday)") plt.xticks(rotation=90) plt.show() # Create the line chart # plt.plot(names, prices, marker='o') # scatter plot plt.scatter(df["200 Day Average"], df["Price (Intraday)"]) plt.xlabel("200 Day Average") plt.ylabel("Price (Intraday)") plt.title("Relationship between 200 Day Average and Price (Intraday)") plt.show()
false
0
471
5
471
471
129899827
import numpy as np import math import pandas as pd from pandas_profiling import ProfileReport from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, make_scorer from sklearn.model_selection import cross_val_score, train_test_split from scipy.stats import skew pd.set_option( "display.float_format", lambda x: "%.3f" % x ) # set số thực chỉ hiện 3 số sau dấu phẩy from matplotlib import pyplot as plt # cho phép vẽ đồ thị ngay trên notebook import seaborn as sns # thư viẹn để vẽ biểu đồ # nhập data df_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) df_train.shape df_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) df_test.shape # quan sát nhanh df_train.describe() # những giá trị bị thiếu: LotArea and MasVnrea ... # Có rất nhiều biến có giá trị max lớn hơn 75% -> có thể có nhiều biến ngoại lệ # Minh họa histogram df_train.hist(bins=200, figsize=(20, 15)) # Kiểm tra xem có id bị trùng lặp không để đảm bảo rằng không có thông tin bị trùng lặp trong dữ liệu print(set(df_train.Id)) # trả về một set chứa các giá trị duy nhất trong cột ID idsUnique = len(set(df_train.Id)) # tính số lượng các giá trị duy nhất trong cột Id idsTotal = df_train.shape[0] # Trả về số lượng hàng trong data idsDupli = idsTotal - idsUnique # Tính số lượng các giá trị bị trùng lặp # Loại bỏ cột Id để loại bỏ thông tin dư thừa và tránh bị overfitting df_train.drop( "Id", axis=1, inplace=True ) # axis = 1 là loại bỏ cột, = 0 là hàng. Inplace = True là loại bỏ ngay trên data gốc, nếu không sẽ tạo một bản sao mới mà không ảnh hưởng bản gốc # Lọc các giá trị ngoại lệ plt.scatter(df_train.GrLivArea, df_train.SalePrice) # vẽ biểu đồ với 2 biến GrLivArea (x) và SalePrice (y) plt.title("Looking for outliers") plt.xlabel("GrLivArea") # Setup tiêu đề với chú thích plt.ylabel("SalePrice") plt.show() # lọc ra những biến ngoại lệ df_train = df_train[df_train.GrLivArea < 4000] # Xử lý EDA # Tìm missing value null = df_train.isnull().sum() / df_train.shape[0] * 100 col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 col_to_drop_test = null_test[null_test > 50].keys() test_df = df_test.drop(col_to_drop_test, axis=1) # điền missing value train_df["LotFrontage"] = train_df["LotFrontage"].fillna(train_df["LotFrontage"].mean()) train_df["MasVnrArea"] = train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].mean()) train_df["GarageYrBlt"] = train_df["GarageYrBlt"].fillna( train_df["GarageYrBlt"].median() ) train_df["MasVnrType"] = train_df["MasVnrType"].fillna(train_df["MasVnrType"].mode()[0]) train_df["BsmtQual"] = train_df["BsmtQual"].fillna(train_df["BsmtQual"].mode()[0]) train_df["BsmtCond"] = train_df["BsmtCond"].fillna(train_df["BsmtCond"].mode()[0]) train_df["BsmtExposure"] = train_df["BsmtExposure"].fillna( train_df["BsmtExposure"].mode()[0] ) train_df["BsmtFinType1"] = train_df["BsmtFinType1"].fillna( train_df["BsmtFinType1"].mode()[0] ) train_df["BsmtFinType2"] = train_df["BsmtFinType2"].fillna( train_df["BsmtFinType2"].mode()[0] ) train_df["Electrical"] = train_df["Electrical"].fillna(train_df["Electrical"].mode()[0]) train_df["FireplaceQu"] = train_df["FireplaceQu"].fillna( train_df["FireplaceQu"].mode()[0] ) train_df["GarageType"] = train_df["GarageType"].fillna(train_df["GarageType"].mode()[0]) train_df["GarageFinish"] = train_df["GarageFinish"].fillna( train_df["GarageFinish"].mode()[0] ) train_df["GarageQual"] = train_df["GarageQual"].fillna(train_df["GarageQual"].mode()[0]) train_df["GarageCond"] = train_df["GarageCond"].fillna(train_df["GarageCond"].mode()[0]) test_df.fillna(test_df.mode().iloc[0], inplace=True) train_df = pd.get_dummies(train_df, drop_first=True) test_df = pd.get_dummies(test_df, drop_first=True) corr = train_df.corr() high_corr_features = corr.index[abs(corr["SalePrice"]) > 0.50] print(f"highly correlated feature:\n", high_corr_features) print(f"No. of highly correlated features:", len(high_corr_features)) plt.figure(figsize=(12, 10)) sns.heatmap( train_df[high_corr_features].corr(), annot=True, linewidth=2, ) X = train_df[high_corr_features.drop("SalePrice")] y = train_df[["SalePrice"]] test_df = test_df[high_corr_features.drop("SalePrice")] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=10 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) test_df = scaler.transform(test_df) # Define error measure for official scoring : RMSE scorer = make_scorer(mean_squared_error, greater_is_better=False) def rmse_cv_train(model): rmse = np.sqrt(-cross_val_score(model, X_train, y_train, scoring=scorer, cv=10)) return rmse def rmse_cv_test(model): rmse = np.sqrt(-cross_val_score(model, X_test, y_test, scoring=scorer, cv=10)) return rmse # Linear Regression lr = LinearRegression() lr.fit(X_train, y_train) # Look at predictions on training and validation set print("RMSE on Training set :", rmse_cv_train(lr).mean()) print("RMSE on Test set :", rmse_cv_test(lr).mean()) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) # Plot residuals plt.scatter(y_train_pred, y_train_pred - y_train, label="Training data") plt.scatter(y_test_pred, y_test_pred - y_test, label="Validation data") plt.title("Linear regression") plt.xlabel("Predicted values") plt.ylabel("Residuals") plt.legend(loc="upper left") plt.hlines(y=0, xmin=10.5, xmax=13.5) plt.show() # Plot predictions plt.scatter(y_train_pred, y_train, label="Training data") plt.scatter(y_test_pred, y_test, label="Validation data") plt.title("Linear regression") plt.xlabel("Predicted values") plt.ylabel("Real values") plt.legend(loc="upper left") plt.plot([10.5, 13.5], [10.5, 13.5]) plt.show() pred = lr.predict(X_test) plt.scatter(y_test, pred) plt.xlabel("Y Test") plt.ylabel("Predicted Y") prediction = lr.predict(test_df) ids = df_test["Id"] Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction.flatten()}) Final_sub.head(10) Final_sub Final_sub.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/899/129899827.ipynb
null
null
[{"Id": 129899827, "ScriptId": 37977831, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13926471, "CreationDate": "05/17/2023 09:47:14", "VersionNumber": 3.0, "Title": "Serious", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 181.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import math import pandas as pd from pandas_profiling import ProfileReport from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, make_scorer from sklearn.model_selection import cross_val_score, train_test_split from scipy.stats import skew pd.set_option( "display.float_format", lambda x: "%.3f" % x ) # set số thực chỉ hiện 3 số sau dấu phẩy from matplotlib import pyplot as plt # cho phép vẽ đồ thị ngay trên notebook import seaborn as sns # thư viẹn để vẽ biểu đồ # nhập data df_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) df_train.shape df_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) df_test.shape # quan sát nhanh df_train.describe() # những giá trị bị thiếu: LotArea and MasVnrea ... # Có rất nhiều biến có giá trị max lớn hơn 75% -> có thể có nhiều biến ngoại lệ # Minh họa histogram df_train.hist(bins=200, figsize=(20, 15)) # Kiểm tra xem có id bị trùng lặp không để đảm bảo rằng không có thông tin bị trùng lặp trong dữ liệu print(set(df_train.Id)) # trả về một set chứa các giá trị duy nhất trong cột ID idsUnique = len(set(df_train.Id)) # tính số lượng các giá trị duy nhất trong cột Id idsTotal = df_train.shape[0] # Trả về số lượng hàng trong data idsDupli = idsTotal - idsUnique # Tính số lượng các giá trị bị trùng lặp # Loại bỏ cột Id để loại bỏ thông tin dư thừa và tránh bị overfitting df_train.drop( "Id", axis=1, inplace=True ) # axis = 1 là loại bỏ cột, = 0 là hàng. Inplace = True là loại bỏ ngay trên data gốc, nếu không sẽ tạo một bản sao mới mà không ảnh hưởng bản gốc # Lọc các giá trị ngoại lệ plt.scatter(df_train.GrLivArea, df_train.SalePrice) # vẽ biểu đồ với 2 biến GrLivArea (x) và SalePrice (y) plt.title("Looking for outliers") plt.xlabel("GrLivArea") # Setup tiêu đề với chú thích plt.ylabel("SalePrice") plt.show() # lọc ra những biến ngoại lệ df_train = df_train[df_train.GrLivArea < 4000] # Xử lý EDA # Tìm missing value null = df_train.isnull().sum() / df_train.shape[0] * 100 col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 col_to_drop_test = null_test[null_test > 50].keys() test_df = df_test.drop(col_to_drop_test, axis=1) # điền missing value train_df["LotFrontage"] = train_df["LotFrontage"].fillna(train_df["LotFrontage"].mean()) train_df["MasVnrArea"] = train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].mean()) train_df["GarageYrBlt"] = train_df["GarageYrBlt"].fillna( train_df["GarageYrBlt"].median() ) train_df["MasVnrType"] = train_df["MasVnrType"].fillna(train_df["MasVnrType"].mode()[0]) train_df["BsmtQual"] = train_df["BsmtQual"].fillna(train_df["BsmtQual"].mode()[0]) train_df["BsmtCond"] = train_df["BsmtCond"].fillna(train_df["BsmtCond"].mode()[0]) train_df["BsmtExposure"] = train_df["BsmtExposure"].fillna( train_df["BsmtExposure"].mode()[0] ) train_df["BsmtFinType1"] = train_df["BsmtFinType1"].fillna( train_df["BsmtFinType1"].mode()[0] ) train_df["BsmtFinType2"] = train_df["BsmtFinType2"].fillna( train_df["BsmtFinType2"].mode()[0] ) train_df["Electrical"] = train_df["Electrical"].fillna(train_df["Electrical"].mode()[0]) train_df["FireplaceQu"] = train_df["FireplaceQu"].fillna( train_df["FireplaceQu"].mode()[0] ) train_df["GarageType"] = train_df["GarageType"].fillna(train_df["GarageType"].mode()[0]) train_df["GarageFinish"] = train_df["GarageFinish"].fillna( train_df["GarageFinish"].mode()[0] ) train_df["GarageQual"] = train_df["GarageQual"].fillna(train_df["GarageQual"].mode()[0]) train_df["GarageCond"] = train_df["GarageCond"].fillna(train_df["GarageCond"].mode()[0]) test_df.fillna(test_df.mode().iloc[0], inplace=True) train_df = pd.get_dummies(train_df, drop_first=True) test_df = pd.get_dummies(test_df, drop_first=True) corr = train_df.corr() high_corr_features = corr.index[abs(corr["SalePrice"]) > 0.50] print(f"highly correlated feature:\n", high_corr_features) print(f"No. of highly correlated features:", len(high_corr_features)) plt.figure(figsize=(12, 10)) sns.heatmap( train_df[high_corr_features].corr(), annot=True, linewidth=2, ) X = train_df[high_corr_features.drop("SalePrice")] y = train_df[["SalePrice"]] test_df = test_df[high_corr_features.drop("SalePrice")] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=10 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) test_df = scaler.transform(test_df) # Define error measure for official scoring : RMSE scorer = make_scorer(mean_squared_error, greater_is_better=False) def rmse_cv_train(model): rmse = np.sqrt(-cross_val_score(model, X_train, y_train, scoring=scorer, cv=10)) return rmse def rmse_cv_test(model): rmse = np.sqrt(-cross_val_score(model, X_test, y_test, scoring=scorer, cv=10)) return rmse # Linear Regression lr = LinearRegression() lr.fit(X_train, y_train) # Look at predictions on training and validation set print("RMSE on Training set :", rmse_cv_train(lr).mean()) print("RMSE on Test set :", rmse_cv_test(lr).mean()) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) # Plot residuals plt.scatter(y_train_pred, y_train_pred - y_train, label="Training data") plt.scatter(y_test_pred, y_test_pred - y_test, label="Validation data") plt.title("Linear regression") plt.xlabel("Predicted values") plt.ylabel("Residuals") plt.legend(loc="upper left") plt.hlines(y=0, xmin=10.5, xmax=13.5) plt.show() # Plot predictions plt.scatter(y_train_pred, y_train, label="Training data") plt.scatter(y_test_pred, y_test, label="Validation data") plt.title("Linear regression") plt.xlabel("Predicted values") plt.ylabel("Real values") plt.legend(loc="upper left") plt.plot([10.5, 13.5], [10.5, 13.5]) plt.show() pred = lr.predict(X_test) plt.scatter(y_test, pred) plt.xlabel("Y Test") plt.ylabel("Predicted Y") prediction = lr.predict(test_df) ids = df_test["Id"] Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction.flatten()}) Final_sub.head(10) Final_sub Final_sub.to_csv("submission.csv", index=False)
false
0
2,405
0
2,405
2,405
129557006
# |الاسم | السكشن | # |---|---| # | مصطفي محمد جمعه| 8 | # | احمد سالم احمد | 1 # | محمد عبدالباسط محمد| 6 # # Setting up dependencies import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv("kidney_disease.csv") # # Data info data.head() data.shape data.dtypes data.info() data.describe() # # Drop duplicates data.drop_duplicates() # # Show Missing value miss = pd.DataFrame({"missing": data.isnull().sum()}) miss data.describe() # # Fill missing value data["age"] = data["age"].fillna(data["age"].median()) # # Show Data data.boxplot(column="age") plt.title("data") plt.show() # # NEURAL NETWORKS data_set = [ ["Eggs"], ["Milk", "beans", "Bananas"], ["Chicken", "Beer", "Eggs", "Bananas"], ["beans", "Beer", "Chicken", "Rice"], ["Eggs"], ["Apple"], ["Rice", "Rice"], ["Rice", "Beer", "Eggs"], ["Eggs", "Apple", "Bananas", "Eggs"], ["Rice", "Milk", "Chicken"], ["Chicken", "beans", "Bananas"], ["beans"], ["Eggs", "Beer", "Apple"], ["Chicken", "Eggs", "Eggs", "Rice"], ["beans", "Eggs", "Chicken"], ] from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori, association_rules # **Transaction Endcoder: conver item list into transaction data** # - The Fit method: learns the unique labels in the dataset # - The Transform method: it transforms the input dataset into a one-hot encoded NumPy boolean array # - The NumPy array is boolean for the sake of memory efficiency when working with large datasets # - the Columns_ attribute: The unique column names that correspond to the data array shown above can be accessed te = TransactionEncoder() te_array = te.fit(data_set).transform(data_set) df2 = pd.DataFrame(te_array, columns=te.columns_) df2 # **Frequent of items with each other** # - Apriori algorithm is used to find all the frequent itemsets freqeunt_item_ap = apriori(df2, min_support=0.01, use_colnames=True) freqeunt_item_ap # - Absolute Support: frequency AKA sport count. # - Relative Support: (Which we use) is the fraction of transactions that contains item. # - Confidence: Measures how often items in Y appear in transactions that contain X. # - Lift correlation: The occurrence of itemset A is independent of the occurrence of itemset if # P(A u B) = P(A)P)B) # - Lift(A => B)>1; means that A and B are positively correlated # - Lift(A =>B) <1 means that the occurrence of A is negatively # - Lift(A => B)=1 means that A and B are Independent rules_ap = association_rules(freqent_item_ap, metric="confidence", min_threshold=0.8) rules_ap
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/557/129557006.ipynb
null
null
[{"Id": 129557006, "ScriptId": 38523755, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12930456, "CreationDate": "05/14/2023 20:06:17", "VersionNumber": 1.0, "Title": "notebook88f85f4a49", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 103.0, "LinesInsertedFromPrevious": 103.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# |الاسم | السكشن | # |---|---| # | مصطفي محمد جمعه| 8 | # | احمد سالم احمد | 1 # | محمد عبدالباسط محمد| 6 # # Setting up dependencies import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv("kidney_disease.csv") # # Data info data.head() data.shape data.dtypes data.info() data.describe() # # Drop duplicates data.drop_duplicates() # # Show Missing value miss = pd.DataFrame({"missing": data.isnull().sum()}) miss data.describe() # # Fill missing value data["age"] = data["age"].fillna(data["age"].median()) # # Show Data data.boxplot(column="age") plt.title("data") plt.show() # # NEURAL NETWORKS data_set = [ ["Eggs"], ["Milk", "beans", "Bananas"], ["Chicken", "Beer", "Eggs", "Bananas"], ["beans", "Beer", "Chicken", "Rice"], ["Eggs"], ["Apple"], ["Rice", "Rice"], ["Rice", "Beer", "Eggs"], ["Eggs", "Apple", "Bananas", "Eggs"], ["Rice", "Milk", "Chicken"], ["Chicken", "beans", "Bananas"], ["beans"], ["Eggs", "Beer", "Apple"], ["Chicken", "Eggs", "Eggs", "Rice"], ["beans", "Eggs", "Chicken"], ] from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori, association_rules # **Transaction Endcoder: conver item list into transaction data** # - The Fit method: learns the unique labels in the dataset # - The Transform method: it transforms the input dataset into a one-hot encoded NumPy boolean array # - The NumPy array is boolean for the sake of memory efficiency when working with large datasets # - the Columns_ attribute: The unique column names that correspond to the data array shown above can be accessed te = TransactionEncoder() te_array = te.fit(data_set).transform(data_set) df2 = pd.DataFrame(te_array, columns=te.columns_) df2 # **Frequent of items with each other** # - Apriori algorithm is used to find all the frequent itemsets freqeunt_item_ap = apriori(df2, min_support=0.01, use_colnames=True) freqeunt_item_ap # - Absolute Support: frequency AKA sport count. # - Relative Support: (Which we use) is the fraction of transactions that contains item. # - Confidence: Measures how often items in Y appear in transactions that contain X. # - Lift correlation: The occurrence of itemset A is independent of the occurrence of itemset if # P(A u B) = P(A)P)B) # - Lift(A => B)>1; means that A and B are positively correlated # - Lift(A =>B) <1 means that the occurrence of A is negatively # - Lift(A => B)=1 means that A and B are Independent rules_ap = association_rules(freqent_item_ap, metric="confidence", min_threshold=0.8) rules_ap
false
0
852
0
852
852
129557092
<jupyter_start><jupyter_text>Набор данных для классификации документов Создан на основе набора данных, доступного на http://www.mechanoid.kiev.ua/ml-text-proc.html (Автоматизированная обработка текстов на естественном языке, с использованием инструментов языка Python, Евгений Борисов, вторник, 24 января 2017 г.) Kaggle dataset identifier: doc-cls <jupyter_script>from sklearn.feature_extraction.text import CountVectorizer import numpy as np from scipy.sparse import csr_matrix import pymorphy2 import re from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session file = open("../input/doc-cls/corp0.txt", "r") filedata = file.readlines() print("Размер dataset", len(filedata)) # dictionary = file.get_feature_names() # print(" Размер словаря корпуса до предварительной обработки.", len(dictionary)) # ### 1. Подготовка набора данных. # Вариант токенизации: # · ограничиваем максимальную длину документа (по числу словоформ); # · приводим в нижний регистр; # · иностранные слова заменяем на FRGN; # · элементы с цифрами заменяем на NUMB; # · удаляем знаки препинания; # · Лемматизация русских слов. Её цель - сокращение словаря; # · разделение данных и меток в разные файлы (x.txt, y.txt). # Лемматизация – это приведение словоформы к лемме, или нормальной форме. # Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета dataset = filedata[35:39] # [54:60] print("Dataset \n", dataset, "\n") print("Размер dataset", len(dataset)) # делаем все слова нижнего регистра a = [x.lower() for x in dataset] # иностранные слова заменяем на FRGN; элементы с цифрами заменяем на NUMB b = [] n = len(a) for i in range(n): b.append(a[i].split(" ")) for j in range(0, len(b[i])): if re.search("[a-z]", b[i][j]): b[i][j] = "frgn" # "FRGN" if re.search("[0-9]", b[i][j]): b[i][j] = "numb" # "NUMB" b[i] = " ".join(b[i]) # print(b) out = [] for x in b: out.append(re.sub(r"[^\w\s]", "", x)) dataset = out print("--------------------------------------") print("dataset после подготовки данных") print(dataset) # лемматизация # Заменяет слова леммами def to_normal_form(morph, s): s2 = s.split() # Список слов предложения s s = "" for w in s2: w = morph.parse(w)[0].normal_form s += " " + w return s.lstrip() morph = pymorphy2.MorphAnalyzer() for i in range(len(dataset)): dataset[i] = to_normal_form(morph, dataset[i]) print(dataset) dataset = out # извлекаем метки print("--------------------------------------") y_data = [] n = len(dataset) for i in range(n): y_words = dataset[i].split(" ") y_data.append(y_words[0]) # print("Разбиение предложения по словам y_words", y_words,"\n") print("Метки y_label, \n", y_data, "\n") # извлекаем предложения без меток print("--------------------------------------") x_data = [] n = len(dataset) for i in range(n): ind_begin = dataset[i].index(" ") ind_begin += 1 x_data.append(dataset[i][ind_begin:]) print("Предложения без меток: \n", x_data, "\n") f_x = open("f_x.txt", "w") f_y = open("f_y.txt", "w") np.savetxt(f_x, x_data, fmt="%s") np.savetxt(f_y, y_data, fmt="%s") f_x.close() f_y.close() # x_data = np.array(x_data) # y_data = np.array(y_data) # for line in x_data: # f_x.write(line+"\n") # for line in y_data: # f_y.write(line+"\n") # # Основная программа # ### Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета # #Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета # dataset = filedata # #print("Dataset \n",dataset, "\n") # print("Размер dataset", len(dataset)) # #делаем все слова нижнего регистра # a = [x.lower() for x in dataset] # # иностранные слова заменяем на FRGN; элементы с цифрами заменяем на NUMB # b = [] # n = len(a) # for i in range (n): # b.append(a[i].split(" ")) # for j in range (0, len(b[i])): # if (re.search('[a-z]', b[i][j])): # b[i][j]= "frgn"#"FRGN" # if (re.search('[0-9]', b[i][j])): # b[i][j]= "numb"#"NUMB" # b[i]=" ".join(b[i]) # #print(b) # out = [] # for x in b: # out.append (re.sub(r'[^\w\s]','', x)) # dataset = out # # print("--------------------------------------") # # print("dataset после подготовки данных") # # print(dataset) # print("Результат получен") # ### лемматизация # # лемматизация # # Заменяет слова леммами # def to_normal_form(morph, s): # s2 = s.split() # Список слов предложения s # s = '' # for w in s2: # w = morph.parse(w)[0].normal_form # s += (' ' + w) # return s.lstrip() # morph = pymorphy2.MorphAnalyzer() # for i in range (len(dataset)): # dataset[i]=to_normal_form(morph, dataset[i]) # #print (dataset) # print("Результат получен") # dataset = out # #извлекаем метки # y_data = [] # n = len (dataset) # for i in range (n): # y_words = dataset[i].split(" ") # y_data.append(y_words[0]) # #print("Разбиение предложения по словам y_words", y_words,"\n") # #print("Метки y_label, \n", y_data,"\n") # #извлекаем предложения без меток # x_data = [] # n = len (dataset) # for i in range (n): # ind_begin = dataset[i].index(" ") # #ind_end = dataset[i].index("\n") # ind_begin +=1 # #x_data.append(dataset[i][ind_begin:ind_end]) # x_data.append(dataset[i][ind_begin:]) # #print("Предложения без меток: \n", x_data,"\n") # f_x = open("f_x.txt", "w") # f_y = open("f_y.txt", "w") # np.savetxt(f_x, x_data, fmt='%s') # np.savetxt(f_y, y_data, fmt='%s') # f_x.close() # f_y.close() # print("Данные записаны в файлы") # ### 2. Делим набор данных на обучающее и проверочное множества. x_file = open("../input/new-data/f_x_dat.txt", "r") x_data = x_file.readlines() print("Размер dataset", len(x_data)) y_file = open("../input/new-data/f_y_dat.txt", "r") y_data = y_file.readlines() print("Кол-во меток", len(y_data)) # <загрузка данных и меток в списки (массивы) x, y> k_split = 0.2 x = x_data y = y_data from sklearn.preprocessing import LabelEncoder import keras lencoder = LabelEncoder() lencoder.fit(y_data) x_trn, x_vl, y_trn, y_vl = train_test_split(x, y, test_size=k_split, shuffle=True) print("кол-во данных в x_trn", len(x_trn)) print("кол-во данных в x_ vl", len(x_vl)) # print(x_trn,"\n", x_vl,"\n", y_trn,"\n", y_vl) # ### 3. Векторизация. from sklearn.feature_extraction.text import CountVectorizer vec = CountVectorizer( token_pattern="\w+", binary=False ) # Если задать ,binary = True то векторы, представляющие строки текста, будут бинарными len_trn = len(x_trn) x_trn.extend(x_vl) # Объединяем x_trn и x_vl и получаем полный корпус x_trn = vec.fit_transform(x_trn) x_vl = x_trn[len_trn:] x_trn = x_trn[:len_trn] # Преобразуем разреженные матрицы в массивы x_trn = np.float32(x_trn.toarray()) x_vl = np.float32(x_vl.toarray()) print(x_trn.shape) print(x_vl.shape) print(len(vec.get_feature_names())) import time # ### SGDClassifier. from sklearn.linear_model import SGDClassifier doc_clf = SGDClassifier(loss="hinge", max_iter=1000, tol=1e-3) start_time = time.time() # print(y_trn) doc_clf.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) ) # ### LogisticRegression from sklearn.linear_model import LogisticRegression doc_clf = LogisticRegression( solver="lbfgs", max_iter=500, multi_class="auto" # newton-cg ) start_time = time.time() doc_clf.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) ) # --------------------- from sklearn.svm import SVC doc_clf_svc = SVC() # doc_clf = SGDClassifier(loss = 'hinge', max_iter = 1000, tol = 1e-3) start_time = time.time() # print(y_trn) doc_clf_svc.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf_svc.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf_svc.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/557/129557092.ipynb
doc-cls
olegbartenyev
[{"Id": 129557092, "ScriptId": 22853614, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8580041, "CreationDate": "05/14/2023 20:07:39", "VersionNumber": 1.0, "Title": "Abrosimova_LR-14", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 350.0, "LinesInsertedFromPrevious": 350.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185737996, "KernelVersionId": 129557092, "SourceDatasetVersionId": 1899667}]
[{"Id": 1899667, "DatasetId": 1132090, "DatasourceVersionId": 1937988, "CreatorUserId": 2471026, "LicenseName": "Unknown", "CreationDate": "02/01/2021 13:15:00", "VersionNumber": 2.0, "Title": "\u041d\u0430\u0431\u043e\u0440 \u0434\u0430\u043d\u043d\u044b\u0445 \u0434\u043b\u044f \u043a\u043b\u0430\u0441\u0441\u0438\u0444\u0438\u043a\u0430\u0446\u0438\u0438 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u043e\u0432", "Slug": "doc-cls", "Subtitle": NaN, "Description": "\u0421\u043e\u0437\u0434\u0430\u043d \u043d\u0430 \u043e\u0441\u043d\u043e\u0432\u0435 \u043d\u0430\u0431\u043e\u0440\u0430 \u0434\u0430\u043d\u043d\u044b\u0445, \u0434\u043e\u0441\u0442\u0443\u043f\u043d\u043e\u0433\u043e \u043d\u0430 http://www.mechanoid.kiev.ua/ml-text-proc.html (\u0410\u0432\u0442\u043e\u043c\u0430\u0442\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u0430\u044f \u043e\u0431\u0440\u0430\u0431\u043e\u0442\u043a\u0430 \u0442\u0435\u043a\u0441\u0442\u043e\u0432 \u043d\u0430 \u0435\u0441\u0442\u0435\u0441\u0442\u0432\u0435\u043d\u043d\u043e\u043c \u044f\u0437\u044b\u043a\u0435, \u0441 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u043d\u0438\u0435\u043c \u0438\u043d\u0441\u0442\u0440\u0443\u043c\u0435\u043d\u0442\u043e\u0432 \u044f\u0437\u044b\u043a\u0430 Python, \u0415\u0432\u0433\u0435\u043d\u0438\u0439 \u0411\u043e\u0440\u0438\u0441\u043e\u0432, \u0432\u0442\u043e\u0440\u043d\u0438\u043a, 24 \u044f\u043d\u0432\u0430\u0440\u044f 2017 \u0433.)", "VersionNotes": "\u041d\u0430\u0431\u043e\u0440 \u0434\u0430\u043d\u043d\u044b\u0445 \u0434\u043b\u044f \u043a\u043b\u0430\u0441\u0441\u0438\u0444\u0438\u043a\u0430\u0446\u0438\u0438 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0439", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1132090, "CreatorUserId": 2471026, "OwnerUserId": 2471026.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1899667.0, "CurrentDatasourceVersionId": 1937988.0, "ForumId": 1149525, "Type": 2, "CreationDate": "02/01/2021 12:55:50", "LastActivityDate": "02/01/2021", "TotalViews": 1428, "TotalDownloads": 38, "TotalVotes": 1, "TotalKernels": 1}]
[{"Id": 2471026, "UserName": "olegbartenyev", "DisplayName": "Oleg Bartenyev", "RegisterDate": "11/09/2018", "PerformanceTier": 0}]
from sklearn.feature_extraction.text import CountVectorizer import numpy as np from scipy.sparse import csr_matrix import pymorphy2 import re from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session file = open("../input/doc-cls/corp0.txt", "r") filedata = file.readlines() print("Размер dataset", len(filedata)) # dictionary = file.get_feature_names() # print(" Размер словаря корпуса до предварительной обработки.", len(dictionary)) # ### 1. Подготовка набора данных. # Вариант токенизации: # · ограничиваем максимальную длину документа (по числу словоформ); # · приводим в нижний регистр; # · иностранные слова заменяем на FRGN; # · элементы с цифрами заменяем на NUMB; # · удаляем знаки препинания; # · Лемматизация русских слов. Её цель - сокращение словаря; # · разделение данных и меток в разные файлы (x.txt, y.txt). # Лемматизация – это приведение словоформы к лемме, или нормальной форме. # Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета dataset = filedata[35:39] # [54:60] print("Dataset \n", dataset, "\n") print("Размер dataset", len(dataset)) # делаем все слова нижнего регистра a = [x.lower() for x in dataset] # иностранные слова заменяем на FRGN; элементы с цифрами заменяем на NUMB b = [] n = len(a) for i in range(n): b.append(a[i].split(" ")) for j in range(0, len(b[i])): if re.search("[a-z]", b[i][j]): b[i][j] = "frgn" # "FRGN" if re.search("[0-9]", b[i][j]): b[i][j] = "numb" # "NUMB" b[i] = " ".join(b[i]) # print(b) out = [] for x in b: out.append(re.sub(r"[^\w\s]", "", x)) dataset = out print("--------------------------------------") print("dataset после подготовки данных") print(dataset) # лемматизация # Заменяет слова леммами def to_normal_form(morph, s): s2 = s.split() # Список слов предложения s s = "" for w in s2: w = morph.parse(w)[0].normal_form s += " " + w return s.lstrip() morph = pymorphy2.MorphAnalyzer() for i in range(len(dataset)): dataset[i] = to_normal_form(morph, dataset[i]) print(dataset) dataset = out # извлекаем метки print("--------------------------------------") y_data = [] n = len(dataset) for i in range(n): y_words = dataset[i].split(" ") y_data.append(y_words[0]) # print("Разбиение предложения по словам y_words", y_words,"\n") print("Метки y_label, \n", y_data, "\n") # извлекаем предложения без меток print("--------------------------------------") x_data = [] n = len(dataset) for i in range(n): ind_begin = dataset[i].index(" ") ind_begin += 1 x_data.append(dataset[i][ind_begin:]) print("Предложения без меток: \n", x_data, "\n") f_x = open("f_x.txt", "w") f_y = open("f_y.txt", "w") np.savetxt(f_x, x_data, fmt="%s") np.savetxt(f_y, y_data, fmt="%s") f_x.close() f_y.close() # x_data = np.array(x_data) # y_data = np.array(y_data) # for line in x_data: # f_x.write(line+"\n") # for line in y_data: # f_y.write(line+"\n") # # Основная программа # ### Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета # #Проверка. Пример предварительной обработки корпуса с выводом результатов для нескольких элементов датасета # dataset = filedata # #print("Dataset \n",dataset, "\n") # print("Размер dataset", len(dataset)) # #делаем все слова нижнего регистра # a = [x.lower() for x in dataset] # # иностранные слова заменяем на FRGN; элементы с цифрами заменяем на NUMB # b = [] # n = len(a) # for i in range (n): # b.append(a[i].split(" ")) # for j in range (0, len(b[i])): # if (re.search('[a-z]', b[i][j])): # b[i][j]= "frgn"#"FRGN" # if (re.search('[0-9]', b[i][j])): # b[i][j]= "numb"#"NUMB" # b[i]=" ".join(b[i]) # #print(b) # out = [] # for x in b: # out.append (re.sub(r'[^\w\s]','', x)) # dataset = out # # print("--------------------------------------") # # print("dataset после подготовки данных") # # print(dataset) # print("Результат получен") # ### лемматизация # # лемматизация # # Заменяет слова леммами # def to_normal_form(morph, s): # s2 = s.split() # Список слов предложения s # s = '' # for w in s2: # w = morph.parse(w)[0].normal_form # s += (' ' + w) # return s.lstrip() # morph = pymorphy2.MorphAnalyzer() # for i in range (len(dataset)): # dataset[i]=to_normal_form(morph, dataset[i]) # #print (dataset) # print("Результат получен") # dataset = out # #извлекаем метки # y_data = [] # n = len (dataset) # for i in range (n): # y_words = dataset[i].split(" ") # y_data.append(y_words[0]) # #print("Разбиение предложения по словам y_words", y_words,"\n") # #print("Метки y_label, \n", y_data,"\n") # #извлекаем предложения без меток # x_data = [] # n = len (dataset) # for i in range (n): # ind_begin = dataset[i].index(" ") # #ind_end = dataset[i].index("\n") # ind_begin +=1 # #x_data.append(dataset[i][ind_begin:ind_end]) # x_data.append(dataset[i][ind_begin:]) # #print("Предложения без меток: \n", x_data,"\n") # f_x = open("f_x.txt", "w") # f_y = open("f_y.txt", "w") # np.savetxt(f_x, x_data, fmt='%s') # np.savetxt(f_y, y_data, fmt='%s') # f_x.close() # f_y.close() # print("Данные записаны в файлы") # ### 2. Делим набор данных на обучающее и проверочное множества. x_file = open("../input/new-data/f_x_dat.txt", "r") x_data = x_file.readlines() print("Размер dataset", len(x_data)) y_file = open("../input/new-data/f_y_dat.txt", "r") y_data = y_file.readlines() print("Кол-во меток", len(y_data)) # <загрузка данных и меток в списки (массивы) x, y> k_split = 0.2 x = x_data y = y_data from sklearn.preprocessing import LabelEncoder import keras lencoder = LabelEncoder() lencoder.fit(y_data) x_trn, x_vl, y_trn, y_vl = train_test_split(x, y, test_size=k_split, shuffle=True) print("кол-во данных в x_trn", len(x_trn)) print("кол-во данных в x_ vl", len(x_vl)) # print(x_trn,"\n", x_vl,"\n", y_trn,"\n", y_vl) # ### 3. Векторизация. from sklearn.feature_extraction.text import CountVectorizer vec = CountVectorizer( token_pattern="\w+", binary=False ) # Если задать ,binary = True то векторы, представляющие строки текста, будут бинарными len_trn = len(x_trn) x_trn.extend(x_vl) # Объединяем x_trn и x_vl и получаем полный корпус x_trn = vec.fit_transform(x_trn) x_vl = x_trn[len_trn:] x_trn = x_trn[:len_trn] # Преобразуем разреженные матрицы в массивы x_trn = np.float32(x_trn.toarray()) x_vl = np.float32(x_vl.toarray()) print(x_trn.shape) print(x_vl.shape) print(len(vec.get_feature_names())) import time # ### SGDClassifier. from sklearn.linear_model import SGDClassifier doc_clf = SGDClassifier(loss="hinge", max_iter=1000, tol=1e-3) start_time = time.time() # print(y_trn) doc_clf.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) ) # ### LogisticRegression from sklearn.linear_model import LogisticRegression doc_clf = LogisticRegression( solver="lbfgs", max_iter=500, multi_class="auto" # newton-cg ) start_time = time.time() doc_clf.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) ) # --------------------- from sklearn.svm import SVC doc_clf_svc = SVC() # doc_clf = SGDClassifier(loss = 'hinge', max_iter = 1000, tol = 1e-3) start_time = time.time() # print(y_trn) doc_clf_svc.fit(x_trn, y_trn) # Обучение классификатора end_time = time.time() - start_time print("Время выполнения : {:.8f} секунд ".format(end_time)) print("Оценка точности классификации") score = doc_clf_svc.score(x_vl, y_vl) print("Точность на проверочном множестве:", round(score, 4)) score = doc_clf_svc.score(x_trn, y_trn) print("Точность на обучающем множестве:", round(score, 4)) predictions = doc_clf.predict(x_vl) print( classification_report( lencoder.transform(y_vl), lencoder.transform(predictions), digits=4, target_names=lencoder.classes_, ) )
false
0
3,845
0
3,984
3,845
129557749
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/working"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session Tags = pd.read_csv("/kaggle/working/Filtered_tags.csv") Questions = pd.read_csv("/kaggle/working/Combined.csv") tag_map = { "html5": "html", "iphone": "ios", "ruby-on-rails": "ruby", "ruby-on-rails-3": "ruby", "asp.net-mvc": "asp.net", "xcode": "ios", "vba": "excel", "python-2.7": "python", "css3": "css", "excel-vba": "excel", "sql-server-2008": "sql-server", "visual-studio-2010": "visual-studio", "google-maps": "google", "google-chrome": "google", "jsp": "java", "python-3.x": "python", } def reset_tags(tag): try: return tag_map[tag] except: return tag Tags = pd.read_csv("/kaggle/working/Filtered_tags.csv") Tags["Tag"] = Tags["Tag"].apply(lambda x: reset_tags(x)) Tags["Tag"].value_counts() Tags.shape Tags = Tags.drop_duplicates() Tags = Tags[["Id", "Tag"]] Tags.shape Tags = Tags.groupby("Id", as_index=False).agg(lambda x: ",".join(x)) Tags.head() Tags["Tag"] = Tags["Tag"].apply(lambda x: x.split(",")) data = pd.merge(Questions, Tags, on="Id") data.head() data.shape data = data[["Body", "Tag"]] data.head() Body = data["Body"] Tag = data["Tag"] from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn import tree binarizer = MultiLabelBinarizer() Tag = binarizer.fit_transform(Tag) Tag binarizer.classes_ Tag = pd.DataFrame(Tag, columns=binarizer.classes_) Tag.head() tfidf = TfidfVectorizer(max_features=20000, stop_words="english") Body_features = tfidf.fit_transform(Body) Body_features.shape X_train, X_test, Y_train, Y_test = train_test_split(Body_features, Tag, random_state=0) model = LinearSVC() clf = OneVsRestClassifier(model) clf.fit(X_train, Y_train) Y_pred = clf.predict(X_test) score = np.minimum(Y_test, Y_pred).sum(axis=1) / np.maximum(Y_test, Y_pred).sum(axis=1) print("Score: " + str(score.mean() * 100)) import skmultilearn from skmultilearn.problem_transform import BinaryRelevance from skmultilearn.problem_transform import ClassifierChain from skmultilearn.problem_transform import LabelPowerset from skmultilearn.adapt import MLkNN from sklearn.naive_bayes import MultinomialNB binary_rel_clf = BinaryRelevance(MultinomialNB()) binary_rel_clf.fit(X_train, Y_train)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/557/129557749.ipynb
null
null
[{"Id": 129557749, "ScriptId": 38507153, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10365766, "CreationDate": "05/14/2023 20:17:01", "VersionNumber": 1.0, "Title": "notebookc5e36ec9ee", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 138.0, "LinesInsertedFromPrevious": 138.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/working"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session Tags = pd.read_csv("/kaggle/working/Filtered_tags.csv") Questions = pd.read_csv("/kaggle/working/Combined.csv") tag_map = { "html5": "html", "iphone": "ios", "ruby-on-rails": "ruby", "ruby-on-rails-3": "ruby", "asp.net-mvc": "asp.net", "xcode": "ios", "vba": "excel", "python-2.7": "python", "css3": "css", "excel-vba": "excel", "sql-server-2008": "sql-server", "visual-studio-2010": "visual-studio", "google-maps": "google", "google-chrome": "google", "jsp": "java", "python-3.x": "python", } def reset_tags(tag): try: return tag_map[tag] except: return tag Tags = pd.read_csv("/kaggle/working/Filtered_tags.csv") Tags["Tag"] = Tags["Tag"].apply(lambda x: reset_tags(x)) Tags["Tag"].value_counts() Tags.shape Tags = Tags.drop_duplicates() Tags = Tags[["Id", "Tag"]] Tags.shape Tags = Tags.groupby("Id", as_index=False).agg(lambda x: ",".join(x)) Tags.head() Tags["Tag"] = Tags["Tag"].apply(lambda x: x.split(",")) data = pd.merge(Questions, Tags, on="Id") data.head() data.shape data = data[["Body", "Tag"]] data.head() Body = data["Body"] Tag = data["Tag"] from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn import tree binarizer = MultiLabelBinarizer() Tag = binarizer.fit_transform(Tag) Tag binarizer.classes_ Tag = pd.DataFrame(Tag, columns=binarizer.classes_) Tag.head() tfidf = TfidfVectorizer(max_features=20000, stop_words="english") Body_features = tfidf.fit_transform(Body) Body_features.shape X_train, X_test, Y_train, Y_test = train_test_split(Body_features, Tag, random_state=0) model = LinearSVC() clf = OneVsRestClassifier(model) clf.fit(X_train, Y_train) Y_pred = clf.predict(X_test) score = np.minimum(Y_test, Y_pred).sum(axis=1) / np.maximum(Y_test, Y_pred).sum(axis=1) print("Score: " + str(score.mean() * 100)) import skmultilearn from skmultilearn.problem_transform import BinaryRelevance from skmultilearn.problem_transform import ClassifierChain from skmultilearn.problem_transform import LabelPowerset from skmultilearn.adapt import MLkNN from sklearn.naive_bayes import MultinomialNB binary_rel_clf = BinaryRelevance(MultinomialNB()) binary_rel_clf.fit(X_train, Y_train)
false
0
1,030
0
1,030
1,030
129189475
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling as pf train = pd.read_csv("../input/dac-find-it-2023/training_set_features.csv", sep=",") labels = pd.read_csv("../input/dac-find-it-2023/training_set_labels.csv", sep=",") test = pd.read_csv("../input/dac-find-it-2023/test_set_features.csv", sep=",") print(test_features_data.shape) print(training_set_labels.shape) num_cols = train.select_dtypes("number").columns cat_cols = [ "race", "sex", "marital_status", "rent_or_own", "hhs_geo_region", "census_msa", "employment_industry", "employment_occupation", ] ord_cols = ["age_group", "education", "income_poverty", "employment_status"] assert len(num_cols) + len(cat_cols) + len(ord_cols) == train.shape[1] from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from category_encoders import OrdinalEncoder as oe from catboost import CatBoostClassifier from catboost import Pool, cv from sklearn.metrics import roc_curve, roc_auc_score import optuna # ### TRAIN IMPUTE for col in cat_cols + ord_cols: train[col] = train[col].fillna(value="None") for col in num_cols: train[col] = train[col].fillna(value=-1) # ### TEST IMPUTE for col in cat_cols + ord_cols: test[col] = test[col].fillna(value="None") for col in num_cols: test[col] = test[col].fillna(value=-1) # ## SPLIT DATA from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train, labels, test_size=0.3, random_state=68 ) categorical_features_indices = np.where(X_train.dtypes != float)[0] # ## OPTUNA & CROSS VALIDATION train_dataset = Pool( data=X_train, label=y_train.h1n1_vaccine, cat_features=categorical_features_indices ) def objective(trial): param = { "iterations": trial.suggest_categorical( "iterations", [100, 200, 300, 500, 1000, 1200, 1500] ), "learning_rate": trial.suggest_float("learning_rate", 0.001, 0.3), "random_strength": trial.suggest_int("random_strength", 1, 10), "bagging_temperature": trial.suggest_int("bagging_temperature", 0, 10), "max_bin": trial.suggest_categorical("max_bin", [4, 5, 6, 8, 10, 20, 30]), "grow_policy": trial.suggest_categorical( "grow_policy", ["SymmetricTree", "Depthwise", "Lossguide"] ), "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, 10), "od_type": "Iter", "od_wait": 100, "depth": trial.suggest_int("max_depth", 2, 10), "l2_leaf_reg": trial.suggest_loguniform("l2_leaf_reg", 1e-8, 100), "one_hot_max_size": trial.suggest_categorical( "one_hot_max_size", [5, 10, 12, 100, 500, 1024] ), "custom_metric": ["AUC"], "loss_function": "Logloss", "auto_class_weights": trial.suggest_categorical( "auto_class_weights", ["Balanced", "SqrtBalanced"] ), } scores = cv( train_dataset, param, fold_count=5, early_stopping_rounds=10, plot=False, verbose=False, ) return scores["test-AUC-mean"].max() sampler = optuna.samplers.TPESampler( seed=68 ) # Make the sampler behave in a deterministic way. study = optuna.create_study(direction="maximize", sampler=sampler) study.optimize(objective, n_trials=100) print("Number of finished trials: {}".format(len(study.trials))) print("Best trial:") trial = study.best_trial print(" Value: {}".format(trial.value)) print(" Params: ") for key, value in trial.params.items(): print(" {}={},".format(key, value)) # ## MODEL CHECKING final_model = CatBoostClassifier( verbose=False, cat_features=categorical_features_indices, **trial.params ) final_model.fit(X_train, y_train.h1n1_vaccine) predictions_h1 = final_model.predict_proba(X_test) predictions_h1 = predictions_h1[:, 1].reshape(-1, 1) from sklearn.metrics import roc_curve, roc_auc_score def plot_roc(y_true, y_score, label_name, ax): fpr, tpr, thresholds = roc_curve(y_true, y_score) ax.plot(fpr, tpr) ax.plot([0, 1], [0, 1], color="grey", linestyle="--") ax.set_ylabel("TPR") ax.set_xlabel("FPR") ax.set_title(f"{label_name}: AUC = {roc_auc_score(y_true, y_score):.4f}") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) plot_roc(y_test["h1n1_vaccine"], predictions_h1, "h1n1_vaccine", ax=ax) roc_auc_score(y_test.h1n1_vaccine, predictions_h1) # ## FULL DATASET RE-TRAIN # ### H1N1 final_model.fit(train, labels.h1n1_vaccine) final_h1 = final_model.predict_proba(test) final_h1 = final_h1[:, 1].reshape(-1, 1) # ### SEASONAL final_model_se.fit(train, labels.seasonal_vaccine) final_se = final_model_se.predict_proba(test) final_se = final_se[:, 1].reshape(-1, 1) # # SUBMISSION PREDICTION submission_data = pd.read_csv( "../input/dac-find-it-2023/submission_format.csv", sep=",", index_col="respondent_id", ) np.testing.assert_array_equal(test.index.values, submission_data.index.values) submission_data["h1n1_vaccine"] = final_h1 submission_data["seasonal_vaccine"] = final_se submission_data.head() submission_data.to_csv(f"KsatriaPetir_for_DAC_FindIT.csv", index=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/189/129189475.ipynb
null
null
[{"Id": 129189475, "ScriptId": 37938767, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12501108, "CreationDate": "05/11/2023 16:55:09", "VersionNumber": 3.0, "Title": "KP_DAC_FindIT", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 146.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 48.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling as pf train = pd.read_csv("../input/dac-find-it-2023/training_set_features.csv", sep=",") labels = pd.read_csv("../input/dac-find-it-2023/training_set_labels.csv", sep=",") test = pd.read_csv("../input/dac-find-it-2023/test_set_features.csv", sep=",") print(test_features_data.shape) print(training_set_labels.shape) num_cols = train.select_dtypes("number").columns cat_cols = [ "race", "sex", "marital_status", "rent_or_own", "hhs_geo_region", "census_msa", "employment_industry", "employment_occupation", ] ord_cols = ["age_group", "education", "income_poverty", "employment_status"] assert len(num_cols) + len(cat_cols) + len(ord_cols) == train.shape[1] from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from category_encoders import OrdinalEncoder as oe from catboost import CatBoostClassifier from catboost import Pool, cv from sklearn.metrics import roc_curve, roc_auc_score import optuna # ### TRAIN IMPUTE for col in cat_cols + ord_cols: train[col] = train[col].fillna(value="None") for col in num_cols: train[col] = train[col].fillna(value=-1) # ### TEST IMPUTE for col in cat_cols + ord_cols: test[col] = test[col].fillna(value="None") for col in num_cols: test[col] = test[col].fillna(value=-1) # ## SPLIT DATA from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train, labels, test_size=0.3, random_state=68 ) categorical_features_indices = np.where(X_train.dtypes != float)[0] # ## OPTUNA & CROSS VALIDATION train_dataset = Pool( data=X_train, label=y_train.h1n1_vaccine, cat_features=categorical_features_indices ) def objective(trial): param = { "iterations": trial.suggest_categorical( "iterations", [100, 200, 300, 500, 1000, 1200, 1500] ), "learning_rate": trial.suggest_float("learning_rate", 0.001, 0.3), "random_strength": trial.suggest_int("random_strength", 1, 10), "bagging_temperature": trial.suggest_int("bagging_temperature", 0, 10), "max_bin": trial.suggest_categorical("max_bin", [4, 5, 6, 8, 10, 20, 30]), "grow_policy": trial.suggest_categorical( "grow_policy", ["SymmetricTree", "Depthwise", "Lossguide"] ), "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, 10), "od_type": "Iter", "od_wait": 100, "depth": trial.suggest_int("max_depth", 2, 10), "l2_leaf_reg": trial.suggest_loguniform("l2_leaf_reg", 1e-8, 100), "one_hot_max_size": trial.suggest_categorical( "one_hot_max_size", [5, 10, 12, 100, 500, 1024] ), "custom_metric": ["AUC"], "loss_function": "Logloss", "auto_class_weights": trial.suggest_categorical( "auto_class_weights", ["Balanced", "SqrtBalanced"] ), } scores = cv( train_dataset, param, fold_count=5, early_stopping_rounds=10, plot=False, verbose=False, ) return scores["test-AUC-mean"].max() sampler = optuna.samplers.TPESampler( seed=68 ) # Make the sampler behave in a deterministic way. study = optuna.create_study(direction="maximize", sampler=sampler) study.optimize(objective, n_trials=100) print("Number of finished trials: {}".format(len(study.trials))) print("Best trial:") trial = study.best_trial print(" Value: {}".format(trial.value)) print(" Params: ") for key, value in trial.params.items(): print(" {}={},".format(key, value)) # ## MODEL CHECKING final_model = CatBoostClassifier( verbose=False, cat_features=categorical_features_indices, **trial.params ) final_model.fit(X_train, y_train.h1n1_vaccine) predictions_h1 = final_model.predict_proba(X_test) predictions_h1 = predictions_h1[:, 1].reshape(-1, 1) from sklearn.metrics import roc_curve, roc_auc_score def plot_roc(y_true, y_score, label_name, ax): fpr, tpr, thresholds = roc_curve(y_true, y_score) ax.plot(fpr, tpr) ax.plot([0, 1], [0, 1], color="grey", linestyle="--") ax.set_ylabel("TPR") ax.set_xlabel("FPR") ax.set_title(f"{label_name}: AUC = {roc_auc_score(y_true, y_score):.4f}") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) plot_roc(y_test["h1n1_vaccine"], predictions_h1, "h1n1_vaccine", ax=ax) roc_auc_score(y_test.h1n1_vaccine, predictions_h1) # ## FULL DATASET RE-TRAIN # ### H1N1 final_model.fit(train, labels.h1n1_vaccine) final_h1 = final_model.predict_proba(test) final_h1 = final_h1[:, 1].reshape(-1, 1) # ### SEASONAL final_model_se.fit(train, labels.seasonal_vaccine) final_se = final_model_se.predict_proba(test) final_se = final_se[:, 1].reshape(-1, 1) # # SUBMISSION PREDICTION submission_data = pd.read_csv( "../input/dac-find-it-2023/submission_format.csv", sep=",", index_col="respondent_id", ) np.testing.assert_array_equal(test.index.values, submission_data.index.values) submission_data["h1n1_vaccine"] = final_h1 submission_data["seasonal_vaccine"] = final_se submission_data.head() submission_data.to_csv(f"KsatriaPetir_for_DAC_FindIT.csv", index=True)
false
0
2,046
0
2,046
2,046
129189536
# ## Data Visualization # is the graphical representation of data that helps to communicate complex information in an easy-to-understand manner. In data analysis, data visualization plays an important role in exploring and understanding data, identifying patterns and relationships, and communicating insights to stakeholders. # Python has a variety of libraries for data visualization, including Matplotlib, Seaborn, Plotly, and Bokeh, among others. These libraries provide a wide range of visualization techniques, from simple bar charts and line graphs to more complex heatmaps and interactive visualizations. # **Data visualization is important for data analysts for several reasons:** # * It helps to identify patterns and trends that may not be apparent from raw data. # * It provides a way to communicate insights and findings to stakeholders in a clear and concise manner. # * It enables data analysts to explore large datasets and extract valuable insights quickly. # * It allows for the comparison of different datasets and variables, which can aid in decision-making. # A use case for data visualization in data analysis could be to explore a large dataset of customer transactions and identify patterns in customer behavior. By visualizing the data with techniques such as scatterplots or heatmaps, analysts can quickly identify trends in customer spending habits, such as peak times for purchases or popular product categories. This information can then be used to inform marketing strategies or optimize product offerings. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # we will manly focus on matplotlib library for visulization in this notebook import matplotlib.pyplot as plt
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/189/129189536.ipynb
null
null
[{"Id": 129189536, "ScriptId": 38406898, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8032988, "CreationDate": "05/11/2023 16:55:56", "VersionNumber": 1.0, "Title": "Data_Visualization", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 36.0, "LinesInsertedFromPrevious": 36.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# ## Data Visualization # is the graphical representation of data that helps to communicate complex information in an easy-to-understand manner. In data analysis, data visualization plays an important role in exploring and understanding data, identifying patterns and relationships, and communicating insights to stakeholders. # Python has a variety of libraries for data visualization, including Matplotlib, Seaborn, Plotly, and Bokeh, among others. These libraries provide a wide range of visualization techniques, from simple bar charts and line graphs to more complex heatmaps and interactive visualizations. # **Data visualization is important for data analysts for several reasons:** # * It helps to identify patterns and trends that may not be apparent from raw data. # * It provides a way to communicate insights and findings to stakeholders in a clear and concise manner. # * It enables data analysts to explore large datasets and extract valuable insights quickly. # * It allows for the comparison of different datasets and variables, which can aid in decision-making. # A use case for data visualization in data analysis could be to explore a large dataset of customer transactions and identify patterns in customer behavior. By visualizing the data with techniques such as scatterplots or heatmaps, analysts can quickly identify trends in customer spending habits, such as peak times for purchases or popular product categories. This information can then be used to inform marketing strategies or optimize product offerings. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # we will manly focus on matplotlib library for visulization in this notebook import matplotlib.pyplot as plt
false
0
504
1
504
504
129189516
# # [Clean] Collaborative Filtering Deep Dive from Jeremy # ## A First Look at the Data from fastai.collab import * from fastai.tabular.all import * path = untar_data(URLs.ML_100k) list(path.ls()) ratings = pd.read_csv( path / "u.data", delimiter="\t", header=None, names=["user", "movie", "rating", "timestamp"], ) ratings.head() last_skywalker = np.array([0.98, 0.9, -0.9]) user1 = np.array([0.9, 0.8, -0.6]) (last_skywalker * user1).sum(), last_skywalker @ user1.squeeze() casablanca = np.array([-0.99, -0.3, 0.8]) (user1 * casablanca).sum(), casablanca @ user1.squeeze() # ## Learning the Latent Factors # ## Creating the DataLoaders movies = pd.read_csv( path / "u.item", delimiter="|", encoding="latin-1", usecols=(0, 1), names=["movie", "title"], header=None, ) movies.head() ratings = ratings.merge(movies) ratings.head() dls = CollabDataLoaders.from_df(ratings, item_name="title", bs=64) dls.show_batch() dls.classes n_users = len(dls.classes["user"]) n_movies = len(dls.classes["title"]) n_factors = 5 user_factors = torch.randn(n_users, n_factors) movie_factors = torch.randn(n_movies, n_factors) one_hot_3 = one_hot(3, n_users).float() # 944x1 @ 994 x 5 ~ 5 x 994 @ 944 x 1 -> 5 x 1 user_factors.t() @ one_hot_3 user_factors.shape, one_hot_3.shape, user_factors.t().shape class Example: def __init__(self, a): self.a = a def say(self, x): return f"Hello {self.a}, {x}." ex = Example("Sylvain") ex.say("nice to meet you") # ## Collaborative Filtering from Scratch # ### Model: (users * movies).sum(dim=1) class DotProduct(Module): def __init__(self, n_users, n_movies, n_factors): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) def forward(self, x): # x 应该是数据索引 shape = 64 x 2 users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) return (users * movies).sum(dim=1) x, y = dls.one_batch() x.shape, y.shape model = DotProduct(n_users, n_movies, 50) learn = Learner(dls, model, loss_func=MSELossFlat()) learn.fit_one_cycle(5, 5e-3) # ### Model: limit Ratings with sigmoid_range class DotProduct2(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) self.y_range = y_range def forward(self, x): users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) return sigmoid_range((users * movies).sum(dim=1), *self.y_range) model2 = DotProduct2(n_users, n_movies, 50) learner2 = Learner(dls, model2, loss_func=MSELossFlat()) learner2.fit_one_cycle(5, 5e-3) # ### Model: Add bias to users and movies class DotProduct3(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) self.y_range = y_range self.user_bias = Embedding(n_users, 1) self.movie_bias = Embedding(n_movies, 1) def forward(self, x): users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) y_range = self.y_range u_bias = self.user_bias(x[:, 0]) m_bias = self.movie_bias(x[:, 1]) acc = (users * movies).sum(dim=1, keepdim=True) acc = acc + u_bias + m_bias return sigmoid_range(acc, *y_range) model3 = DotProduct3(n_users, n_movies, 50) learn3 = Learner(dls, model3, loss_func=MSELossFlat()) learn3.fit_one_cycle(5, 5e-3) # Overfit!!!! at epoch 3 # ### Weight Decay # ``` # loss_with_decay = loss + weight_decay * (parameters ** 2).sum() # loss_with_decay.grad += weight_decay * (2 * parameters) # ``` model4 = DotProduct3(n_users, n_movies, 50) learn4 = Learner(dls, model4, loss_func=MSELossFlat()) learn4.fit_one_cycle(5, 5e-3, wd=0.1) # > Overfit fixed! model5 = DotProduct3(n_users, n_movies, 50) learn5 = Learner(dls, model5, loss_func=MSELossFlat()) learn5.fit_one_cycle(5, 5e-3, wd=0.01) # > Overfit not fixed! # ### Creating Our Own Embedding Module class T(Module): def __init__(self): self.a = torch.ones(3) L(T().parameters()) class T(Module): def __init__(self): self.a = nn.Parameter(torch.ones(3)) L(T().parameters()) class T(Module): def __init__(self): self.a = nn.Linear(1, 3, bias=False) L(T().parameters()) type(T().a.weight) def create_params(size): return nn.Parameter(torch.zeros(*size).normal_(0, 0.01)) create_params((1, 3)) class DotProduct6(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = create_params([n_users, n_factors]) self.user_bias = create_params([n_users]) self.movie_factors = create_params([n_movies, n_factors]) self.movie_bias = create_params([n_movies]) self.y_range = y_range def forward(self, x): users = self.user_factors[x[:, 0]] movies = self.movie_factors[x[:, 1]] res = (users * movies).sum(dim=1) res += self.user_bias[x[:, 0]] + self.movie_bias[x[:, 1]] return sigmoid_range(res, *self.y_range) model6 = DotProduct6(n_users, n_movies, 50) learn6 = Learner(dls, model6, loss_func=MSELossFlat()) learn6.fit_one_cycle(5, 5e-3, wd=0.1) # ## Interpreting Embeddings and Bias learn6.model.movie_bias.shape movie_bias = learn6.model.movie_bias.squeeze() print(movie_bias.sort()) idxs = movie_bias.argsort()[:5] l = dls.classes["title"][idxs] np.array(l)[:, None] idxs = movie_bias.argsort(descending=True)[:5] [dls.classes["title"][i] for i in idxs] # hide_input # id img_pca_movie # caption Representation of movies based on two strongest PCA components # alt Representation of movies based on two strongest PCA components g = ratings.groupby("title")["rating"].count() top_movies = g.sort_values(ascending=False).index.values[:1000] top_idxs = tensor([learn6.dls.classes["title"].o2i[m] for m in top_movies]) movie_w = learn6.model.movie_factors[top_idxs].cpu().detach() movie_pca = movie_w.pca(3) fac0, fac1, fac2 = movie_pca.t() idxs = list(range(50)) X = fac0[idxs] Y = fac2[idxs] plt.figure(figsize=(12, 12)) plt.scatter(X, Y) for i, x, y in zip(top_movies[idxs], X, Y): plt.text(x, y, i, color=np.random.rand(3) * 0.7, fontsize=11) plt.show() from sklearn.cluster import KMeans k = 10 kmeans = KMeans(n_clusters=k, random_state=0).fit(movie_w) labels = kmeans.labels_ centers = kmeans.cluster_centers_ labels[:10] top_movies = g.sort_values(ascending=False).index.values[:1000] top_movies[:5] for i in range(k): cluster_movies = top_movies[labels == i] center = centers[i] most_representative_movie = top_movies[(movie_w @ center).argmax()] print( f"Cluster {i + 1}: {most_representative_movie} ({len(cluster_movies)} movies: {cluster_movies[:5]})" ) # ### Using fastai.collab learn7 = collab_learner(dls, n_factors=50, y_range=(0, 5.5)) learn7.fit_one_cycle(5, 5e-3, wd=0.1) learn7.model movie_bias = learn7.model.i_bias.weight.squeeze() top_idxs = movie_bias.argsort(descending=True)[:5] dls.classes["title"][top_idxs] # ### Embedding Distance movie_factors = learn7.model.i_weight.weight silence_idx = dls.classes["title"].o2i["Silence of the Lambs, The (1991)"] distances = nn.CosineSimilarity(dim=1)(movie_factors, movie_factors[silence_idx][None]) neighbors_idxs = distances.argsort(descending=True)[:10] list(dls.classes["title"][neighbors_idxs]) # ## Bootstrapping a Collaborative Filtering Model # ## Deep Learning for Collaborative Filtering embs = get_emb_sz(dls) embs class CollabNN(Module): def __init__(self, user_sz, item_sz, y_range=(0, 5.5), n_act=100): self.user_factors = Embedding(*user_sz) self.item_factors = Embedding(*item_sz) self.y_range = y_range self.layers = nn.Sequential( nn.Linear(user_sz[1] + item_sz[1], n_act), nn.ReLU(), nn.Linear(n_act, 1) ) def forward(self, x): embs = self.user_factors(x[:, 0]), self.item_factors(x[:, 1]) x = self.layers(torch.cat(embs, dim=1)) return sigmoid_range(x, *self.y_range) model8 = CollabNN(*embs) learn8 = Learner(dls, model8, loss_func=MSELossFlat()) learn8.fit_one_cycle(5, 5e-3, wd=0.01) model9 = CollabNN(*embs) learn9 = Learner(dls, model9, loss_func=MSELossFlat()) learn9.fit_one_cycle(5, 5e-3, wd=0.1) learn10 = collab_learner(dls, use_nn=True, y_range=(0, 5.5), layers=[100, 50]) learn10.fit_one_cycle(5, 5e-3, wd=0.1) learn11 = collab_learner(dls, use_nn=True, y_range=(0, 5.5), layers=[100, 50]) learn11.lr_find() learn11.fit_one_cycle(6, 2e-3, wd=0.1)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/189/129189516.ipynb
null
null
[{"Id": 129189516, "ScriptId": 38407414, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14304196, "CreationDate": "05/11/2023 16:55:38", "VersionNumber": 1.0, "Title": "[Clean] Collaborative Filtering Deep Dive from Jer", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 310.0, "LinesInsertedFromPrevious": 310.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # [Clean] Collaborative Filtering Deep Dive from Jeremy # ## A First Look at the Data from fastai.collab import * from fastai.tabular.all import * path = untar_data(URLs.ML_100k) list(path.ls()) ratings = pd.read_csv( path / "u.data", delimiter="\t", header=None, names=["user", "movie", "rating", "timestamp"], ) ratings.head() last_skywalker = np.array([0.98, 0.9, -0.9]) user1 = np.array([0.9, 0.8, -0.6]) (last_skywalker * user1).sum(), last_skywalker @ user1.squeeze() casablanca = np.array([-0.99, -0.3, 0.8]) (user1 * casablanca).sum(), casablanca @ user1.squeeze() # ## Learning the Latent Factors # ## Creating the DataLoaders movies = pd.read_csv( path / "u.item", delimiter="|", encoding="latin-1", usecols=(0, 1), names=["movie", "title"], header=None, ) movies.head() ratings = ratings.merge(movies) ratings.head() dls = CollabDataLoaders.from_df(ratings, item_name="title", bs=64) dls.show_batch() dls.classes n_users = len(dls.classes["user"]) n_movies = len(dls.classes["title"]) n_factors = 5 user_factors = torch.randn(n_users, n_factors) movie_factors = torch.randn(n_movies, n_factors) one_hot_3 = one_hot(3, n_users).float() # 944x1 @ 994 x 5 ~ 5 x 994 @ 944 x 1 -> 5 x 1 user_factors.t() @ one_hot_3 user_factors.shape, one_hot_3.shape, user_factors.t().shape class Example: def __init__(self, a): self.a = a def say(self, x): return f"Hello {self.a}, {x}." ex = Example("Sylvain") ex.say("nice to meet you") # ## Collaborative Filtering from Scratch # ### Model: (users * movies).sum(dim=1) class DotProduct(Module): def __init__(self, n_users, n_movies, n_factors): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) def forward(self, x): # x 应该是数据索引 shape = 64 x 2 users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) return (users * movies).sum(dim=1) x, y = dls.one_batch() x.shape, y.shape model = DotProduct(n_users, n_movies, 50) learn = Learner(dls, model, loss_func=MSELossFlat()) learn.fit_one_cycle(5, 5e-3) # ### Model: limit Ratings with sigmoid_range class DotProduct2(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) self.y_range = y_range def forward(self, x): users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) return sigmoid_range((users * movies).sum(dim=1), *self.y_range) model2 = DotProduct2(n_users, n_movies, 50) learner2 = Learner(dls, model2, loss_func=MSELossFlat()) learner2.fit_one_cycle(5, 5e-3) # ### Model: Add bias to users and movies class DotProduct3(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = Embedding(n_users, n_factors) self.movie_factors = Embedding(n_movies, n_factors) self.y_range = y_range self.user_bias = Embedding(n_users, 1) self.movie_bias = Embedding(n_movies, 1) def forward(self, x): users = self.user_factors(x[:, 0]) movies = self.movie_factors(x[:, 1]) y_range = self.y_range u_bias = self.user_bias(x[:, 0]) m_bias = self.movie_bias(x[:, 1]) acc = (users * movies).sum(dim=1, keepdim=True) acc = acc + u_bias + m_bias return sigmoid_range(acc, *y_range) model3 = DotProduct3(n_users, n_movies, 50) learn3 = Learner(dls, model3, loss_func=MSELossFlat()) learn3.fit_one_cycle(5, 5e-3) # Overfit!!!! at epoch 3 # ### Weight Decay # ``` # loss_with_decay = loss + weight_decay * (parameters ** 2).sum() # loss_with_decay.grad += weight_decay * (2 * parameters) # ``` model4 = DotProduct3(n_users, n_movies, 50) learn4 = Learner(dls, model4, loss_func=MSELossFlat()) learn4.fit_one_cycle(5, 5e-3, wd=0.1) # > Overfit fixed! model5 = DotProduct3(n_users, n_movies, 50) learn5 = Learner(dls, model5, loss_func=MSELossFlat()) learn5.fit_one_cycle(5, 5e-3, wd=0.01) # > Overfit not fixed! # ### Creating Our Own Embedding Module class T(Module): def __init__(self): self.a = torch.ones(3) L(T().parameters()) class T(Module): def __init__(self): self.a = nn.Parameter(torch.ones(3)) L(T().parameters()) class T(Module): def __init__(self): self.a = nn.Linear(1, 3, bias=False) L(T().parameters()) type(T().a.weight) def create_params(size): return nn.Parameter(torch.zeros(*size).normal_(0, 0.01)) create_params((1, 3)) class DotProduct6(Module): def __init__(self, n_users, n_movies, n_factors, y_range=(0, 5.5)): self.user_factors = create_params([n_users, n_factors]) self.user_bias = create_params([n_users]) self.movie_factors = create_params([n_movies, n_factors]) self.movie_bias = create_params([n_movies]) self.y_range = y_range def forward(self, x): users = self.user_factors[x[:, 0]] movies = self.movie_factors[x[:, 1]] res = (users * movies).sum(dim=1) res += self.user_bias[x[:, 0]] + self.movie_bias[x[:, 1]] return sigmoid_range(res, *self.y_range) model6 = DotProduct6(n_users, n_movies, 50) learn6 = Learner(dls, model6, loss_func=MSELossFlat()) learn6.fit_one_cycle(5, 5e-3, wd=0.1) # ## Interpreting Embeddings and Bias learn6.model.movie_bias.shape movie_bias = learn6.model.movie_bias.squeeze() print(movie_bias.sort()) idxs = movie_bias.argsort()[:5] l = dls.classes["title"][idxs] np.array(l)[:, None] idxs = movie_bias.argsort(descending=True)[:5] [dls.classes["title"][i] for i in idxs] # hide_input # id img_pca_movie # caption Representation of movies based on two strongest PCA components # alt Representation of movies based on two strongest PCA components g = ratings.groupby("title")["rating"].count() top_movies = g.sort_values(ascending=False).index.values[:1000] top_idxs = tensor([learn6.dls.classes["title"].o2i[m] for m in top_movies]) movie_w = learn6.model.movie_factors[top_idxs].cpu().detach() movie_pca = movie_w.pca(3) fac0, fac1, fac2 = movie_pca.t() idxs = list(range(50)) X = fac0[idxs] Y = fac2[idxs] plt.figure(figsize=(12, 12)) plt.scatter(X, Y) for i, x, y in zip(top_movies[idxs], X, Y): plt.text(x, y, i, color=np.random.rand(3) * 0.7, fontsize=11) plt.show() from sklearn.cluster import KMeans k = 10 kmeans = KMeans(n_clusters=k, random_state=0).fit(movie_w) labels = kmeans.labels_ centers = kmeans.cluster_centers_ labels[:10] top_movies = g.sort_values(ascending=False).index.values[:1000] top_movies[:5] for i in range(k): cluster_movies = top_movies[labels == i] center = centers[i] most_representative_movie = top_movies[(movie_w @ center).argmax()] print( f"Cluster {i + 1}: {most_representative_movie} ({len(cluster_movies)} movies: {cluster_movies[:5]})" ) # ### Using fastai.collab learn7 = collab_learner(dls, n_factors=50, y_range=(0, 5.5)) learn7.fit_one_cycle(5, 5e-3, wd=0.1) learn7.model movie_bias = learn7.model.i_bias.weight.squeeze() top_idxs = movie_bias.argsort(descending=True)[:5] dls.classes["title"][top_idxs] # ### Embedding Distance movie_factors = learn7.model.i_weight.weight silence_idx = dls.classes["title"].o2i["Silence of the Lambs, The (1991)"] distances = nn.CosineSimilarity(dim=1)(movie_factors, movie_factors[silence_idx][None]) neighbors_idxs = distances.argsort(descending=True)[:10] list(dls.classes["title"][neighbors_idxs]) # ## Bootstrapping a Collaborative Filtering Model # ## Deep Learning for Collaborative Filtering embs = get_emb_sz(dls) embs class CollabNN(Module): def __init__(self, user_sz, item_sz, y_range=(0, 5.5), n_act=100): self.user_factors = Embedding(*user_sz) self.item_factors = Embedding(*item_sz) self.y_range = y_range self.layers = nn.Sequential( nn.Linear(user_sz[1] + item_sz[1], n_act), nn.ReLU(), nn.Linear(n_act, 1) ) def forward(self, x): embs = self.user_factors(x[:, 0]), self.item_factors(x[:, 1]) x = self.layers(torch.cat(embs, dim=1)) return sigmoid_range(x, *self.y_range) model8 = CollabNN(*embs) learn8 = Learner(dls, model8, loss_func=MSELossFlat()) learn8.fit_one_cycle(5, 5e-3, wd=0.01) model9 = CollabNN(*embs) learn9 = Learner(dls, model9, loss_func=MSELossFlat()) learn9.fit_one_cycle(5, 5e-3, wd=0.1) learn10 = collab_learner(dls, use_nn=True, y_range=(0, 5.5), layers=[100, 50]) learn10.fit_one_cycle(5, 5e-3, wd=0.1) learn11 = collab_learner(dls, use_nn=True, y_range=(0, 5.5), layers=[100, 50]) learn11.lr_find() learn11.fit_one_cycle(6, 2e-3, wd=0.1)
false
0
3,307
1
3,307
3,307
129084132
<jupyter_start><jupyter_text>California Independent Medical Review Dataset This is a good starter dataset for NLP enthusiasts. ## Content This data is from the California Department of Managed Health Care (DMHC). It contains all decisions from Independent Medical Reviews (IMR) administered by the DMHC since January 1, 2001. An IMR is an independent review of a denied, delayed, or modified health care service that the health plan has determined to be not medically necessary, experimental/investigational or non-emergent/urgent. If the IMR is decided in an enrollee's favor, the health plan must authorize the service or treatment requested. Image Credit - [Moondance](https://pixabay.com/users/elf-moondance-19728901/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5947297) from [Pixabay](https://pixabay.com/illustrations/doctor-nurse-patient-coronavirus-5947297/) Kaggle dataset identifier: ca-independent-medical-review <jupyter_script># # Observations on the California Independent Medical Review Dataset # This is the [California Independent Medical Review (IMR) dataset](https://www.kaggle.com/datasets/prasad22/ca-independent-medical-review) published by Prasad Patil on Kaggle. It contains every health insurance coverage appeal (or "IMR") that was requested in California between 2001 and 2016 (19,245 IMRs). # Some observations and statistics were extracted for hepatitis, gender, autism, and short stature treatments. # This also incorporates [our script that fills in missing values for age and gender](https://www.kaggle.com/code/protobioengineering/california-imr-improved-age-gender-columns/). # ## Table of Contents # 1. Imports and Data Cleaning # 2. Observations # 1. 58% of IMRs were requested for female patients # 2. 61% of Data from 2001-2003 is Missing Age and Gender # 3. Orthopedic Issues Are the Most Common Condition Needing an IMR (18%) # 4. Hepatitis Prompts a Disproportionate Number of IMRs # 5. The Top Diagnostic Categories by Gender # 6. Males Have More Autism Spectrum-Related IMR Requests # 7. Women Had Every OB-Gyn Request, Except for 1 Male Patient # 8. No female patients were listed under typically "male" diagnostic categories # 9. 3 Times More IMRs for Short Stature Treatment are Made for Boys than Girls # 10. Short Stature Treatment Appeals are Highest During Puberty Age # ## Imports and Data Cleaning import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imr_df = pd.read_csv( "/kaggle/input/ca-independent-medical-review/Independent_Medical_Review__IMR__Determinations__Trend.csv" ) imr_df["Report Year"] = imr_df["Report Year"].astype(int) imr_df # ### Data Cleaning # This extracts the gender and age from the `Findings` column into two new columns, `Extracted Gender` and `Extracted Age`. 1210 observations from 2001-2003 were missing age and gender previously. import re def extract_gender(row): """ If the first 20 words of the "Findings" column contain a gendered word to describe the patient, then assign them that gender. Male keywords = 'his' or 'male' Female keywords = 'her' or 'female' """ gender = None if row["Patient Gender"] == "Male" or row["Patient Gender"] == "Female": gender = row["Patient Gender"] elif str(row["Patient Gender"]).lower() == "nan": first_20_words = row["Findings"].split(" ")[:20] if "his" in first_20_words or "male" in first_20_words: gender = "Male" elif "her" in first_20_words or "female" in first_20_words: gender = "Female" return gender def extract_age(row): """ If the first 20 words of the "Findings" column contain anything that looks like "XX-year-old" with XX being an age/number, make that the age. """ if type(row["Findings"]) != str: return row["Findings"] else: first_20_words = row["Findings"].split(" ")[:20] age = "None" for word in first_20_words: temp_age = re.search(r"(\d{1,3})[\- ]year", word, re.IGNORECASE) if temp_age is not None: age = int(temp_age.group(1)) return age def calculate_age_range(row): """ Uses the age ("Extracted Age") that was extracted from "Findings" to repopulate the 1210 missing values in the dataset's original "Age Range" column. """ age_range = row["Age Range"] extracted_age = row["Extracted Age"] if age_range != str: if extracted_age >= 0 and extracted_age <= 10: age_range = "0-10" elif extracted_age >= 11 and extracted_age <= 20: age_range = "11-20" elif extracted_age >= 21 and extracted_age <= 30: age_range = "21-30" elif extracted_age >= 31 and extracted_age <= 40: age_range = "31-40" elif extracted_age >= 41 and extracted_age <= 50: age_range = "41-50" elif extracted_age >= 51 and extracted_age <= 64: age_range = "51-64" elif extracted_age >= 65: age_range = "65+" return age_range def fix_age_range_typo(row): """ One of the values in the "Age Range" column has a typo. Every value that should be "11-20" has the typo of "11_20" (an underscore). """ age_range = row["Age Range"] if age_range == "11_20": age_range = "11-20" return age_range # Extract gender and age values into new columns imr_df["Extracted Gender"] = imr_df.apply(extract_gender, axis=1) imr_df["Extracted Age"] = imr_df.apply(extract_age, axis=1) # Recalculate original age range column's NaN values from new `Extracted Age` imr_df["Age Range"] = imr_df.apply(calculate_age_range, axis=1) # Fix a typo in the "11-20" age range category imr_df["Age Range"] = imr_df.apply(fix_age_range_typo, axis=1) # ## Observations # ### 58% of IMRs were requested for female patients # * **58%** of IMR requests were for **female** patients # * **42%** of IMR requests were for **male** patients # * 0.05% of the IMRs do not have any gender documented. female_reviews = imr_df["Extracted Gender"].value_counts(dropna=False)["Female"] male_reviews = imr_df["Extracted Gender"].value_counts(dropna=False)["Male"] unknown_gender_reviews = imr_df["Extracted Gender"].isna().sum() total_reviews = len(imr_df["Extracted Gender"]) print(f"IMR reviews for females: {female_reviews}") print(f"IMR reviews for males: {male_reviews}") print(f"IMR reviews for unknown gender: {unknown_gender_reviews}") print(f"Total IMR reviews for: {total_reviews}\n") print(f"Female percentage of IMRs: {female_reviews / total_reviews * 100}") print(f"Male percentage of IMRs: {male_reviews / total_reviews * 100}") print( f"'Unknown gender' percentage of IMRs: {unknown_gender_reviews / total_reviews * 100}" ) # ### Follow-up questions: # * Why are more female patients requesting IMRs? # * Do certain conditions experienced by females have poorer insurance coverage or tend to get denied coverage outright? # * Are males less likely to request IMRs? # * Could both groups be equally denied coverage, yet males do not follow up on appealing to get coverage? # ### Potential Resources for Questions # Men are less likely than women to see their primary care providers, according to a 2013 Canadian survey. # * https://bmcprimcare.biomedcentral.com/articles/10.1186/s12875-016-0440-0 # It may be that men are as equally likely as women to both get denied coverage and to request an IMR, but perhaps male patients' requests are lower overall, since they are less likely to become patients in the first place. However, this cannot be determined from this dataset. # ## 61% of Data from 2001-2003 is Missing Age and Gender # Prior to using the age and gender extraction script above, the oldest data (2001-2003) was missing age and gender for 1210 IMRs. None of the IMRs in 2001 listed a "Male" gender for patients, though there are some for female patients. All observations for 2004-2016 have a "Male" or "Female" patient gender. # This means that the **original data collection process** used by the California Department of Insurance **may not have been as robust or precise when it was first implemented**. This should be noted when analyzing other data from 2001-2003. years_missing_gender = imr_df[imr_df["Patient Gender"].isna()]["Report Year"].unique() years_with_female = imr_df[imr_df["Patient Gender"] == "Female"]["Report Year"].unique() years_with_male = imr_df[imr_df["Patient Gender"] == "Male"]["Report Year"].unique() total_missing_gender = imr_df["Patient Gender"].isna().sum() total_rows_2001_to_2003 = imr_df[imr_df["Report Year"].isin([2001, 2002, 2003])] percentage_missing_gender = total_missing_gender / len(total_rows_2001_to_2003) * 100 years_missing_gender = np.sort(years_missing_gender) years_with_female = np.sort(years_with_female) years_with_male = np.sort(years_with_male) print(f"Years with some gender data missing: {years_missing_gender}") print(f"Years with female gender data: {years_with_female}") print(f"Years with male gender data: {years_with_male}") print(f"Total rows missing gender: {total_missing_gender}") print( f"Percentage of rows in 2001-2003 missing age/gender: {percentage_missing_gender}" ) # ## Orthopedic Issues Are the Most Common Condition Needing an IMR at 18% # IMRs for **orthopedic and musculoskeletal health issues** are the most commonly requested, making up **18%** of all IMRs in California. Following that are **mental health issues (13%)**, **cancer (9%)** and **central nervous system/neuromuscular issues (8%)**. imr_df["Diagnosis Category"].value_counts().head(10) total_imr = len(imr_df) top_10_diag = ( imr_df["Diagnosis Category"].value_counts().head(10).reset_index(name="Count") ) top_10_diag["Percentage"] = top_10_diag["Count"] / total_imr * 100 top_10_diag.columns = ["Diagnosis Category", "Count", "Percentage"] top_10_diag # ## Hepatitis Prompts a Disproportionate Number of IMRs # Hepatitis of any kind (A, B, C, etc.) is [acquired by Californians at a rate of about 0.1% per year](https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/ViralHepatitisData.aspx). However, hepatitis made up 4.1% of IMR requests. # Each year in California, people are infected with chronic hepatitis B and C at a rate of 104 per 100,000 residents, or 0.1% of the population. (This was calculated by adding the [yearly infection rate for both hepatitis B (24.8 per 100k residents) and C (79.5 per 100k residents) in California between 2012 and 2016](https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/ViralHepatitisData.aspx).) In addition, the percentage of California residents with hepatitis C in 2013-2016, no matter when infection occurred, was [1,080 per 100,000 residents (1.1%)](https://hepvu.org/local-data/california/). Considering that chronic hepatitis B acquisition happens at 31% the rate that hepatitis C does, we can assume that roughly 1.5% of the population in California had the two most common forms of hepatitis (B and C) in 2016 (with the third most common type, A, infecting [only 0.005% of all Americans per year](https://www.cdc.gov/vaccines/pubs/pinkbook/hepa.html)). # For hepatitis to be present in only 1.5% of Californians yet prompt 4.1% of appeals to health insurance denials (IMRs) is notable. Contrast that with the country-wide prevalence of [backpain, which is around 8% for all Americans](https://hpi.georgetown.edu/backpain/), yet prompts fewer IMRs, as shown below. imr_df["Diagnosis Sub Category"].value_counts().head(10) hepatitis_count = imr_df[imr_df["Diagnosis Sub Category"] == "Hepatitis"].shape[0] hepatitis_percentage = hepatitis_count / len(imr_df) * 100 print(f"Number of IMRs for hepatitis: {hepatitis_count}") print(f"Percentage of IMRs for hepatitis: {hepatitis_percentage}") # ### Additional data on Hepatitis # Hepatitis C has [recently become curable in 95% of people as of 2013](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4231565/) by using sofosbuvir (Sovaldi). Sofosbuvir was discovered in 2007, then was FDA-approved for curing hepatitis C in 2013. Previous antiviral treatments only had a 50% success rate. # ### Follow-up questions # * Do the treatments and findings for hepatitis IMRs reflect the lack of availability of effective and affordable treatments up until recent years? # * Did any hepatitis cases have IMRs listed as "Experimental/Investigative," and did any of these happen to use sofosbuvir? If so, did these IMRs get an "Overturned" decision? Did this change over time? # * Did the need for IMRs for hepatitis C patients drop after sofosbuvir became available in 2013? # ## The Top Diagnostic Categories by Gender # The top 4 categories of conditions are roughly same between male and female patients. Of course, female patients experience OB-Gyn health issues at a rate that male patients do not. female_reviews_only = imr_df[imr_df["Patient Gender"] == "Female"] male_reviews_only = imr_df[imr_df["Patient Gender"] == "Male"] unknown_gender_reviews_only = imr_df[imr_df["Patient Gender"].isna()] female_reviews_only["Diagnosis Category"].value_counts().head(5) male_reviews_only["Diagnosis Category"].value_counts().head(5) # ## Males Have More Autism Spectrum-Related IMR Requests # Male patients have 4 times as many IMR requests that list "Autism Spectrum" as the diagnostic category (95 vs. 429). However, [research over the past decade](https://pubmed.ncbi.nlm.nih.gov/27899710/) has shown that female autism presents differently and is often misdiagnosed. Female patients seeking insurance coverage for autism spectrum treatments may have been lumped in with the "Mental" diagnosis category along with similar conditions that it is misdiagnosed as, like OCD, ADHD, and Bipolar Disorder. female_asc = female_reviews_only["Diagnosis Category"].value_counts()["Autism Spectrum"] male_asc = male_reviews_only["Diagnosis Category"].value_counts()["Autism Spectrum"] print(f"Female IMRs for Autism Spectrum treatments: {female_asc}") print(f"Male IMRs for Autism Spectrum treatments: {male_asc}") # ### Follow-up questions: # * Does the prevalence of IMR requests for autism spectrum conditions track with the society-wide increase in diagnoses of autism, especially for female patients? # * Is coverage for males or females with autism more or less likely to be denied? # * What was the percentage of "Mental" requests for female patients? And how many of these were for disorders commonly mistaken for autism in females (OCD, Bipolar, ADHD, etc.)? # ## Women Had Every OB-Gyn Request, Except for 1 Male Patient # Women had virtually every OB-Gyn IMR, which is not a surprise. However, one male patient is listed as requesting an IMR for an OB-Gyn concern. # The male patient with the OB-Gyn concern was seeking tomosynthesis, which is a 3D mammography technique used to investigate breast cancer. He was listed as having a "Female Breast Disorder", since California's dataset unfortunately does not have a category for male breast cancer, and the female categorization was likely the most accurate for research purposes. 1 in 833 men (or 0.12%) experience breast cancer in their lifetimes in the United States. This is more than the prevalence of many other conditions that got their own categories. # * https://www.bcrf.org/blog/male-breast-cancer-statistics-research/ # ### Follow-up thoughts # The datasets and its categories seem to reflect the genderedness of everyday people's perception of diseases (e.g. autism in females, male breast cancer). # ### Follow-up questions # * Which other diseases seem to require more IMRs due to gender- and sex-based assumptions? # * Are people with diseases that don't fit society's expectations for their sex/gender having to inordinately request IMRs? Are these requests getting upheld or overturned? female_obgyn = female_reviews_only["Diagnosis Category"].value_counts()[ "OB-Gyn/ Pregnancy" ] male_obgyn = male_reviews_only["Diagnosis Category"].value_counts()["OB-Gyn/ Pregnancy"] print(f"Female IMRs for OB-Gyn treatments: {female_obgyn}") print(f"Male IMRs for OB-Gyn Spectrum treatments: {male_obgyn}") male_reviews_only.loc[male_reviews_only["Diagnosis Category"] == "OB-Gyn/ Pregnancy"] # ### Reasoning for the male patient being listed under OB-GYN # The male patient was listed under OB-GYN, because he needed a special diagnostic procedure for breast cancer. It is not clear why he was listed under OB-GYN when cancer has its own diagnostic category and the physicians listed in the description below were board-certified in radiology rather than OB-GYN. details = male_reviews_only.loc[ male_reviews_only["Diagnosis Category"] == "OB-Gyn/ Pregnancy" ]["Findings"] print(details.iloc[0]) # ## No female patients were listed under the typically "male" diagnostic category of prostate cancer # Though one male was listed under a "female" category (`OB-Gyn`), no female patients had any conditions listed under the "prostate cancer" sub category. This is statistically expected, but the previous male patient's breast cancer issue being listed under OB-Gyn was not obvious, so this was done for sanity. Though a woman could theoretically have a prostate due to any number of rare conditions and thus experience cancer in that tissue, none of these cases showed up in the IMR data. try: female_prostate = female_reviews_only["Diagnosis Sub Category"].value_counts()[ "Prostate Cancer" ] except KeyError: # if "Prostate Cancer" not found female_prostate = 0 male_prostate = male_reviews_only["Diagnosis Sub Category"].value_counts()[ "Prostate Cancer" ] print(f"Female IMRs for Prostate Cancer treatments: {female_prostate}") print(f"Male IMRs for Prostate Cancer treatments: {male_prostate}") # ## Short Stature Treatment Appeals are Highest During Puberty Age # * Only 3 of the 208 patients were adults (18-20 years old). # * Most IMRs for short stature treatment are filed for 10-17 year olds, [which are the years of puberty and thus considerable height growth, especially in males](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4266869/). # It is not clear whether health insurance companies are less likely to deny treatments for short stature for children younger than 10, thus reducing likelihood of IMRs being filed, or that short stature becomes most obvious around puberty, prompting more doctor visits and subsequent health insurance denial overall due to sheer volume of patients. # An IMR for adults seeking treatment for short stature is rare, since the typical treatments that work by delaying puberty or increasing human growth hormone (HGH) no longer work in adulthood. short_stature = imr_df[ imr_df["Findings"].str.contains("short stature", case=False, na=False) ] short_stature.head(5) short_stature["Age Range"].value_counts() short_stature_ages = short_stature["Extracted Age"].value_counts().sort_index() print(short_stature_ages) short_stature_ages.plot(kind="bar") # ## 3 Times More IMRs for Short Stature Treatment are Made for Boys than Girls # * 3 times as many appeals were made for males with short stature versus females (154 vs. 55 IMRs). # * IMRs for both males and females are concentrated during pubescent years, though female IMR requests for short stature treatments drop off more quickly around the 15-17 year-old range. short_stature["Extracted Gender"].value_counts() short_stature_female = short_stature[short_stature["Extracted Gender"] == "Female"] short_stature_male = short_stature[short_stature["Extracted Gender"] == "Male"] short_female_counts = short_stature_female["Extracted Age"].value_counts().sort_index() short_male_counts = short_stature_male["Extracted Age"].value_counts().sort_index() short_female_counts.plot(kind="bar", title="IMRs for Female Short Stature", color="red") short_male_counts.plot(kind="bar", title="IMRs for Male Short Stature", color="#304fa6") short_stature_by_gender = pd.concat( [short_female_counts, short_male_counts], axis=1 ).fillna(0) short_stature_by_gender.columns = ["Female", "Male"] short_stature_by_gender.plot( kind="bar", color=["#db4430", "304fa6"], title="IMRs for Short Stature by Gender" ) short_stature_by_gender.plot( kind="bar", stacked=True, color=["#db4430", "#304fa6"], title="IMRs for Short Stature by Gender", )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/084/129084132.ipynb
ca-independent-medical-review
prasad22
[{"Id": 129084132, "ScriptId": 37642083, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14517655, "CreationDate": "05/10/2023 21:55:41", "VersionNumber": 2.0, "Title": "California IMR - Hepatitis, Gender, Short Stature", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 317.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 315.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184824106, "KernelVersionId": 129084132, "SourceDatasetVersionId": 5323631}]
[{"Id": 5323631, "DatasetId": 3092876, "DatasourceVersionId": 5396911, "CreatorUserId": 1610208, "LicenseName": "CC0: Public Domain", "CreationDate": "04/05/2023 17:30:37", "VersionNumber": 1.0, "Title": "California Independent Medical Review Dataset", "Slug": "ca-independent-medical-review", "Subtitle": "Independent Medical Review (IMR) Determinations, Trend", "Description": "This is a good starter dataset for NLP enthusiasts.\n\n## Content\n\nThis data is from the California Department of Managed Health Care (DMHC). It contains all decisions from Independent Medical Reviews (IMR) administered by the DMHC since January 1, 2001. An IMR is an independent review of a denied, delayed, or modified health care service that the health plan has determined to be not medically necessary, experimental/investigational or non-emergent/urgent. If the IMR is decided in an enrollee's favor, the health plan must authorize the service or treatment requested.\n\nImage Credit - [Moondance](https://pixabay.com/users/elf-moondance-19728901/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5947297) from [Pixabay](https://pixabay.com/illustrations/doctor-nurse-patient-coronavirus-5947297/)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3092876, "CreatorUserId": 1610208, "OwnerUserId": 1610208.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5323631.0, "CurrentDatasourceVersionId": 5396911.0, "ForumId": 3155992, "Type": 2, "CreationDate": "04/05/2023 17:30:37", "LastActivityDate": "04/05/2023", "TotalViews": 6072, "TotalDownloads": 658, "TotalVotes": 29, "TotalKernels": 10}]
[{"Id": 1610208, "UserName": "prasad22", "DisplayName": "Prasad Patil", "RegisterDate": "02/05/2018", "PerformanceTier": 2}]
# # Observations on the California Independent Medical Review Dataset # This is the [California Independent Medical Review (IMR) dataset](https://www.kaggle.com/datasets/prasad22/ca-independent-medical-review) published by Prasad Patil on Kaggle. It contains every health insurance coverage appeal (or "IMR") that was requested in California between 2001 and 2016 (19,245 IMRs). # Some observations and statistics were extracted for hepatitis, gender, autism, and short stature treatments. # This also incorporates [our script that fills in missing values for age and gender](https://www.kaggle.com/code/protobioengineering/california-imr-improved-age-gender-columns/). # ## Table of Contents # 1. Imports and Data Cleaning # 2. Observations # 1. 58% of IMRs were requested for female patients # 2. 61% of Data from 2001-2003 is Missing Age and Gender # 3. Orthopedic Issues Are the Most Common Condition Needing an IMR (18%) # 4. Hepatitis Prompts a Disproportionate Number of IMRs # 5. The Top Diagnostic Categories by Gender # 6. Males Have More Autism Spectrum-Related IMR Requests # 7. Women Had Every OB-Gyn Request, Except for 1 Male Patient # 8. No female patients were listed under typically "male" diagnostic categories # 9. 3 Times More IMRs for Short Stature Treatment are Made for Boys than Girls # 10. Short Stature Treatment Appeals are Highest During Puberty Age # ## Imports and Data Cleaning import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imr_df = pd.read_csv( "/kaggle/input/ca-independent-medical-review/Independent_Medical_Review__IMR__Determinations__Trend.csv" ) imr_df["Report Year"] = imr_df["Report Year"].astype(int) imr_df # ### Data Cleaning # This extracts the gender and age from the `Findings` column into two new columns, `Extracted Gender` and `Extracted Age`. 1210 observations from 2001-2003 were missing age and gender previously. import re def extract_gender(row): """ If the first 20 words of the "Findings" column contain a gendered word to describe the patient, then assign them that gender. Male keywords = 'his' or 'male' Female keywords = 'her' or 'female' """ gender = None if row["Patient Gender"] == "Male" or row["Patient Gender"] == "Female": gender = row["Patient Gender"] elif str(row["Patient Gender"]).lower() == "nan": first_20_words = row["Findings"].split(" ")[:20] if "his" in first_20_words or "male" in first_20_words: gender = "Male" elif "her" in first_20_words or "female" in first_20_words: gender = "Female" return gender def extract_age(row): """ If the first 20 words of the "Findings" column contain anything that looks like "XX-year-old" with XX being an age/number, make that the age. """ if type(row["Findings"]) != str: return row["Findings"] else: first_20_words = row["Findings"].split(" ")[:20] age = "None" for word in first_20_words: temp_age = re.search(r"(\d{1,3})[\- ]year", word, re.IGNORECASE) if temp_age is not None: age = int(temp_age.group(1)) return age def calculate_age_range(row): """ Uses the age ("Extracted Age") that was extracted from "Findings" to repopulate the 1210 missing values in the dataset's original "Age Range" column. """ age_range = row["Age Range"] extracted_age = row["Extracted Age"] if age_range != str: if extracted_age >= 0 and extracted_age <= 10: age_range = "0-10" elif extracted_age >= 11 and extracted_age <= 20: age_range = "11-20" elif extracted_age >= 21 and extracted_age <= 30: age_range = "21-30" elif extracted_age >= 31 and extracted_age <= 40: age_range = "31-40" elif extracted_age >= 41 and extracted_age <= 50: age_range = "41-50" elif extracted_age >= 51 and extracted_age <= 64: age_range = "51-64" elif extracted_age >= 65: age_range = "65+" return age_range def fix_age_range_typo(row): """ One of the values in the "Age Range" column has a typo. Every value that should be "11-20" has the typo of "11_20" (an underscore). """ age_range = row["Age Range"] if age_range == "11_20": age_range = "11-20" return age_range # Extract gender and age values into new columns imr_df["Extracted Gender"] = imr_df.apply(extract_gender, axis=1) imr_df["Extracted Age"] = imr_df.apply(extract_age, axis=1) # Recalculate original age range column's NaN values from new `Extracted Age` imr_df["Age Range"] = imr_df.apply(calculate_age_range, axis=1) # Fix a typo in the "11-20" age range category imr_df["Age Range"] = imr_df.apply(fix_age_range_typo, axis=1) # ## Observations # ### 58% of IMRs were requested for female patients # * **58%** of IMR requests were for **female** patients # * **42%** of IMR requests were for **male** patients # * 0.05% of the IMRs do not have any gender documented. female_reviews = imr_df["Extracted Gender"].value_counts(dropna=False)["Female"] male_reviews = imr_df["Extracted Gender"].value_counts(dropna=False)["Male"] unknown_gender_reviews = imr_df["Extracted Gender"].isna().sum() total_reviews = len(imr_df["Extracted Gender"]) print(f"IMR reviews for females: {female_reviews}") print(f"IMR reviews for males: {male_reviews}") print(f"IMR reviews for unknown gender: {unknown_gender_reviews}") print(f"Total IMR reviews for: {total_reviews}\n") print(f"Female percentage of IMRs: {female_reviews / total_reviews * 100}") print(f"Male percentage of IMRs: {male_reviews / total_reviews * 100}") print( f"'Unknown gender' percentage of IMRs: {unknown_gender_reviews / total_reviews * 100}" ) # ### Follow-up questions: # * Why are more female patients requesting IMRs? # * Do certain conditions experienced by females have poorer insurance coverage or tend to get denied coverage outright? # * Are males less likely to request IMRs? # * Could both groups be equally denied coverage, yet males do not follow up on appealing to get coverage? # ### Potential Resources for Questions # Men are less likely than women to see their primary care providers, according to a 2013 Canadian survey. # * https://bmcprimcare.biomedcentral.com/articles/10.1186/s12875-016-0440-0 # It may be that men are as equally likely as women to both get denied coverage and to request an IMR, but perhaps male patients' requests are lower overall, since they are less likely to become patients in the first place. However, this cannot be determined from this dataset. # ## 61% of Data from 2001-2003 is Missing Age and Gender # Prior to using the age and gender extraction script above, the oldest data (2001-2003) was missing age and gender for 1210 IMRs. None of the IMRs in 2001 listed a "Male" gender for patients, though there are some for female patients. All observations for 2004-2016 have a "Male" or "Female" patient gender. # This means that the **original data collection process** used by the California Department of Insurance **may not have been as robust or precise when it was first implemented**. This should be noted when analyzing other data from 2001-2003. years_missing_gender = imr_df[imr_df["Patient Gender"].isna()]["Report Year"].unique() years_with_female = imr_df[imr_df["Patient Gender"] == "Female"]["Report Year"].unique() years_with_male = imr_df[imr_df["Patient Gender"] == "Male"]["Report Year"].unique() total_missing_gender = imr_df["Patient Gender"].isna().sum() total_rows_2001_to_2003 = imr_df[imr_df["Report Year"].isin([2001, 2002, 2003])] percentage_missing_gender = total_missing_gender / len(total_rows_2001_to_2003) * 100 years_missing_gender = np.sort(years_missing_gender) years_with_female = np.sort(years_with_female) years_with_male = np.sort(years_with_male) print(f"Years with some gender data missing: {years_missing_gender}") print(f"Years with female gender data: {years_with_female}") print(f"Years with male gender data: {years_with_male}") print(f"Total rows missing gender: {total_missing_gender}") print( f"Percentage of rows in 2001-2003 missing age/gender: {percentage_missing_gender}" ) # ## Orthopedic Issues Are the Most Common Condition Needing an IMR at 18% # IMRs for **orthopedic and musculoskeletal health issues** are the most commonly requested, making up **18%** of all IMRs in California. Following that are **mental health issues (13%)**, **cancer (9%)** and **central nervous system/neuromuscular issues (8%)**. imr_df["Diagnosis Category"].value_counts().head(10) total_imr = len(imr_df) top_10_diag = ( imr_df["Diagnosis Category"].value_counts().head(10).reset_index(name="Count") ) top_10_diag["Percentage"] = top_10_diag["Count"] / total_imr * 100 top_10_diag.columns = ["Diagnosis Category", "Count", "Percentage"] top_10_diag # ## Hepatitis Prompts a Disproportionate Number of IMRs # Hepatitis of any kind (A, B, C, etc.) is [acquired by Californians at a rate of about 0.1% per year](https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/ViralHepatitisData.aspx). However, hepatitis made up 4.1% of IMR requests. # Each year in California, people are infected with chronic hepatitis B and C at a rate of 104 per 100,000 residents, or 0.1% of the population. (This was calculated by adding the [yearly infection rate for both hepatitis B (24.8 per 100k residents) and C (79.5 per 100k residents) in California between 2012 and 2016](https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/ViralHepatitisData.aspx).) In addition, the percentage of California residents with hepatitis C in 2013-2016, no matter when infection occurred, was [1,080 per 100,000 residents (1.1%)](https://hepvu.org/local-data/california/). Considering that chronic hepatitis B acquisition happens at 31% the rate that hepatitis C does, we can assume that roughly 1.5% of the population in California had the two most common forms of hepatitis (B and C) in 2016 (with the third most common type, A, infecting [only 0.005% of all Americans per year](https://www.cdc.gov/vaccines/pubs/pinkbook/hepa.html)). # For hepatitis to be present in only 1.5% of Californians yet prompt 4.1% of appeals to health insurance denials (IMRs) is notable. Contrast that with the country-wide prevalence of [backpain, which is around 8% for all Americans](https://hpi.georgetown.edu/backpain/), yet prompts fewer IMRs, as shown below. imr_df["Diagnosis Sub Category"].value_counts().head(10) hepatitis_count = imr_df[imr_df["Diagnosis Sub Category"] == "Hepatitis"].shape[0] hepatitis_percentage = hepatitis_count / len(imr_df) * 100 print(f"Number of IMRs for hepatitis: {hepatitis_count}") print(f"Percentage of IMRs for hepatitis: {hepatitis_percentage}") # ### Additional data on Hepatitis # Hepatitis C has [recently become curable in 95% of people as of 2013](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4231565/) by using sofosbuvir (Sovaldi). Sofosbuvir was discovered in 2007, then was FDA-approved for curing hepatitis C in 2013. Previous antiviral treatments only had a 50% success rate. # ### Follow-up questions # * Do the treatments and findings for hepatitis IMRs reflect the lack of availability of effective and affordable treatments up until recent years? # * Did any hepatitis cases have IMRs listed as "Experimental/Investigative," and did any of these happen to use sofosbuvir? If so, did these IMRs get an "Overturned" decision? Did this change over time? # * Did the need for IMRs for hepatitis C patients drop after sofosbuvir became available in 2013? # ## The Top Diagnostic Categories by Gender # The top 4 categories of conditions are roughly same between male and female patients. Of course, female patients experience OB-Gyn health issues at a rate that male patients do not. female_reviews_only = imr_df[imr_df["Patient Gender"] == "Female"] male_reviews_only = imr_df[imr_df["Patient Gender"] == "Male"] unknown_gender_reviews_only = imr_df[imr_df["Patient Gender"].isna()] female_reviews_only["Diagnosis Category"].value_counts().head(5) male_reviews_only["Diagnosis Category"].value_counts().head(5) # ## Males Have More Autism Spectrum-Related IMR Requests # Male patients have 4 times as many IMR requests that list "Autism Spectrum" as the diagnostic category (95 vs. 429). However, [research over the past decade](https://pubmed.ncbi.nlm.nih.gov/27899710/) has shown that female autism presents differently and is often misdiagnosed. Female patients seeking insurance coverage for autism spectrum treatments may have been lumped in with the "Mental" diagnosis category along with similar conditions that it is misdiagnosed as, like OCD, ADHD, and Bipolar Disorder. female_asc = female_reviews_only["Diagnosis Category"].value_counts()["Autism Spectrum"] male_asc = male_reviews_only["Diagnosis Category"].value_counts()["Autism Spectrum"] print(f"Female IMRs for Autism Spectrum treatments: {female_asc}") print(f"Male IMRs for Autism Spectrum treatments: {male_asc}") # ### Follow-up questions: # * Does the prevalence of IMR requests for autism spectrum conditions track with the society-wide increase in diagnoses of autism, especially for female patients? # * Is coverage for males or females with autism more or less likely to be denied? # * What was the percentage of "Mental" requests for female patients? And how many of these were for disorders commonly mistaken for autism in females (OCD, Bipolar, ADHD, etc.)? # ## Women Had Every OB-Gyn Request, Except for 1 Male Patient # Women had virtually every OB-Gyn IMR, which is not a surprise. However, one male patient is listed as requesting an IMR for an OB-Gyn concern. # The male patient with the OB-Gyn concern was seeking tomosynthesis, which is a 3D mammography technique used to investigate breast cancer. He was listed as having a "Female Breast Disorder", since California's dataset unfortunately does not have a category for male breast cancer, and the female categorization was likely the most accurate for research purposes. 1 in 833 men (or 0.12%) experience breast cancer in their lifetimes in the United States. This is more than the prevalence of many other conditions that got their own categories. # * https://www.bcrf.org/blog/male-breast-cancer-statistics-research/ # ### Follow-up thoughts # The datasets and its categories seem to reflect the genderedness of everyday people's perception of diseases (e.g. autism in females, male breast cancer). # ### Follow-up questions # * Which other diseases seem to require more IMRs due to gender- and sex-based assumptions? # * Are people with diseases that don't fit society's expectations for their sex/gender having to inordinately request IMRs? Are these requests getting upheld or overturned? female_obgyn = female_reviews_only["Diagnosis Category"].value_counts()[ "OB-Gyn/ Pregnancy" ] male_obgyn = male_reviews_only["Diagnosis Category"].value_counts()["OB-Gyn/ Pregnancy"] print(f"Female IMRs for OB-Gyn treatments: {female_obgyn}") print(f"Male IMRs for OB-Gyn Spectrum treatments: {male_obgyn}") male_reviews_only.loc[male_reviews_only["Diagnosis Category"] == "OB-Gyn/ Pregnancy"] # ### Reasoning for the male patient being listed under OB-GYN # The male patient was listed under OB-GYN, because he needed a special diagnostic procedure for breast cancer. It is not clear why he was listed under OB-GYN when cancer has its own diagnostic category and the physicians listed in the description below were board-certified in radiology rather than OB-GYN. details = male_reviews_only.loc[ male_reviews_only["Diagnosis Category"] == "OB-Gyn/ Pregnancy" ]["Findings"] print(details.iloc[0]) # ## No female patients were listed under the typically "male" diagnostic category of prostate cancer # Though one male was listed under a "female" category (`OB-Gyn`), no female patients had any conditions listed under the "prostate cancer" sub category. This is statistically expected, but the previous male patient's breast cancer issue being listed under OB-Gyn was not obvious, so this was done for sanity. Though a woman could theoretically have a prostate due to any number of rare conditions and thus experience cancer in that tissue, none of these cases showed up in the IMR data. try: female_prostate = female_reviews_only["Diagnosis Sub Category"].value_counts()[ "Prostate Cancer" ] except KeyError: # if "Prostate Cancer" not found female_prostate = 0 male_prostate = male_reviews_only["Diagnosis Sub Category"].value_counts()[ "Prostate Cancer" ] print(f"Female IMRs for Prostate Cancer treatments: {female_prostate}") print(f"Male IMRs for Prostate Cancer treatments: {male_prostate}") # ## Short Stature Treatment Appeals are Highest During Puberty Age # * Only 3 of the 208 patients were adults (18-20 years old). # * Most IMRs for short stature treatment are filed for 10-17 year olds, [which are the years of puberty and thus considerable height growth, especially in males](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4266869/). # It is not clear whether health insurance companies are less likely to deny treatments for short stature for children younger than 10, thus reducing likelihood of IMRs being filed, or that short stature becomes most obvious around puberty, prompting more doctor visits and subsequent health insurance denial overall due to sheer volume of patients. # An IMR for adults seeking treatment for short stature is rare, since the typical treatments that work by delaying puberty or increasing human growth hormone (HGH) no longer work in adulthood. short_stature = imr_df[ imr_df["Findings"].str.contains("short stature", case=False, na=False) ] short_stature.head(5) short_stature["Age Range"].value_counts() short_stature_ages = short_stature["Extracted Age"].value_counts().sort_index() print(short_stature_ages) short_stature_ages.plot(kind="bar") # ## 3 Times More IMRs for Short Stature Treatment are Made for Boys than Girls # * 3 times as many appeals were made for males with short stature versus females (154 vs. 55 IMRs). # * IMRs for both males and females are concentrated during pubescent years, though female IMR requests for short stature treatments drop off more quickly around the 15-17 year-old range. short_stature["Extracted Gender"].value_counts() short_stature_female = short_stature[short_stature["Extracted Gender"] == "Female"] short_stature_male = short_stature[short_stature["Extracted Gender"] == "Male"] short_female_counts = short_stature_female["Extracted Age"].value_counts().sort_index() short_male_counts = short_stature_male["Extracted Age"].value_counts().sort_index() short_female_counts.plot(kind="bar", title="IMRs for Female Short Stature", color="red") short_male_counts.plot(kind="bar", title="IMRs for Male Short Stature", color="#304fa6") short_stature_by_gender = pd.concat( [short_female_counts, short_male_counts], axis=1 ).fillna(0) short_stature_by_gender.columns = ["Female", "Male"] short_stature_by_gender.plot( kind="bar", color=["#db4430", "304fa6"], title="IMRs for Short Stature by Gender" ) short_stature_by_gender.plot( kind="bar", stacked=True, color=["#db4430", "#304fa6"], title="IMRs for Short Stature by Gender", )
false
1
5,959
0
6,228
5,959
129084675
import tensorflow as tf import tensorflow_decision_forests as tfdf import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Comment this if the data visualisations doesn't work on your side print("TensorFlow v" + tf.__version__) print("TensorFlow Decision Forests v" + tfdf.__version__) train_file_path = "../input/house-prices-advanced-regression-techniques/train.csv" dataset_df = pd.read_csv(train_file_path) print("Full train dataset shape is {}".format(dataset_df.shape)) dataset_df.head(3) dataset_df = dataset_df.drop("Id", axis=1) dataset_df.head(3) dataset_df.info() print(dataset_df["SalePrice"].describe()) plt.figure(figsize=(9, 8)) sns.distplot(dataset_df["SalePrice"], color="g", bins=100, hist_kws={"alpha": 0.4}) list(set(dataset_df.dtypes.tolist())) df_num = dataset_df.select_dtypes(include=["float64", "int64"]) df_num.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/084/129084675.ipynb
null
null
[{"Id": 129084675, "ScriptId": 38373469, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13832174, "CreationDate": "05/10/2023 22:06:25", "VersionNumber": 3.0, "Title": "House Prediction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 31.0, "LinesInsertedFromPrevious": 30.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import tensorflow as tf import tensorflow_decision_forests as tfdf import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Comment this if the data visualisations doesn't work on your side print("TensorFlow v" + tf.__version__) print("TensorFlow Decision Forests v" + tfdf.__version__) train_file_path = "../input/house-prices-advanced-regression-techniques/train.csv" dataset_df = pd.read_csv(train_file_path) print("Full train dataset shape is {}".format(dataset_df.shape)) dataset_df.head(3) dataset_df = dataset_df.drop("Id", axis=1) dataset_df.head(3) dataset_df.info() print(dataset_df["SalePrice"].describe()) plt.figure(figsize=(9, 8)) sns.distplot(dataset_df["SalePrice"], color="g", bins=100, hist_kws={"alpha": 0.4}) list(set(dataset_df.dtypes.tolist())) df_num = dataset_df.select_dtypes(include=["float64", "int64"]) df_num.head()
false
0
281
0
281
281
129084638
# Импортируем необходимые модули import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import plotly.express as px from plotly.subplots import make_subplots from plotly import tools import plotly.graph_objs as go # Файл с данными: "Analytic_test". [Ссылка.](https://docs.google.com/spreadsheets/d/1YoYnKTWtTtOpAdUYDmaDvIJghn8CFBFI/edit?usp=share_link&ouid=115750805206343556774&rtpof=true&sd=true) # В нем в упорядоченном формате собраны данные о 4 компаниях: Red, Black, Green и Blue. # По компаниям предоставлены следующие данные: # * *Hid* - human_id, зашифрованный персональный идентификатор пользователя компаний # * *has_company_id* - наличие зарегистрированного аккаунта в компании # * *company_gmv_total* - суммарный gmv пользователя в сервисе # * *company_gvm_order_count* - количество успешных заказов пользователя в сервисе # * *company_activity_count* - количество активных действий пользователя в сервисе # * *company_android* - установленное у пользователя приложение компании на платформе android # * *company_apple* - установленное у пользователя приложение компании на платформе Apple # * *company_web* - установленное у пользователя приложение компании на ПК # * *is_msk* - территориальное расположение в Москве # Загружаем датасет companies = pd.read_excel("Analytic_test.xlsx", header=1, index_col=0) companies.head() # # Data Preprocessing print("Shape of companies dataset:", companies.shape) companies["HID"].nunique() # Все пользователи уникальны companies.info() # Скорее всего фича active_Red должна называться has_Red_id. # Проверим это companies["active_Red"].unique() # Так как в данной фиче содержатся 0 и 1, можно сделать вывод о том, что она отвечает за регистрацию аккаунта пользователя в компании Red # Переименуем даную фичу companies = companies.rename(columns={"active_Red": "has_Red_id"}) # Заменим в фичах company_gvm_total gvm на gmv companies = companies.rename( columns={"Green_gvm_total": "Green_gmv_total", "Black_gvm_total": "Black_gmv_total"} ) # Есть одно значение NaN в Blue_gmv_total. # Посмотрим на него companies[companies["Blue_gmv_total"].isna()] # Заменим пропущенное значение на 0, так как данная фича отвечает за gmv companies = companies.fillna(0) companies.info() # Приведем категориальные фичи к категориальному типу companies["has_Red_id"].unique() companies["has_Green_id"].unique() companies["has_Green_id"].value_counts() companies["has_Black_id"].unique() companies["has_Black_id"].value_counts() companies["has_Blue_id"].unique() companies["has_Blue_id"].value_counts() companies["Red_android"].unique() companies["Green_android"].unique() companies["Blue_android"].unique() companies["Black_android"].unique() companies["Red_apple"].unique() companies["Green_apple"].unique() companies["Blue_apple"].unique() companies["Black_apple"].unique() companies["is_msk"].unique() companies["Red_web"].unique() companies["Green_web"].unique() companies["Blue_web"].unique() companies["Black_web"].unique() # Так как признаки has_company_id отвечают за наличие зарегистрированного аккаунта пользователя, то они должны быть представлены двумя значениями: 0 - нет аккаунта и 1 - есть аккаунт. Поэтому, если там находится значение больше 1 - то ставим 1. for col in companies.columns: if "has" in col: companies.loc[companies[col] > 1.0, col] = 1.0 # Создаем список категориальных фичей category_features_names = [ col for col in companies.columns if ("has" in col) or ("android" in col) or ("apple" in col) or ("msk" in col) or ("web" in col) ] companies[category_features_names] = companies[category_features_names].astype( "category" ) companies.info() # Рассмотрим фичи с типом object companies["Blue_activity_count"].unique() companies["Green_gvm_order_count"].unique() companies["Black_gvm_order_count"].unique() # Данные фичи содержат дату в качестве значения # Удалим строки, которые содержат дату и приведем столбцы с count к численному типу objs_features = [ "Black_gvm_order_count", "Green_gvm_order_count", "Blue_activity_count", ] for feature in objs_features: companies[feature] = pd.to_numeric(companies[feature], errors="coerce") companies = companies.dropna() companies.info() # # Data Visualization companies.head() companies.info() # Разделим данные о компаниях по разным таблицам red_company = pd.DataFrame( companies[["HID"] + [col for col in companies.columns if "Red" in col] + ["is_msk"]] ) red_company.head() blue_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Blue" in col] + ["is_msk"] ] ) blue_company.head() green_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Green" in col] + ["is_msk"] ] ) green_company.head() black_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Black" in col] + ["is_msk"] ] ) black_company.head() # График количества зарегистрированных пользователей для каждой компании name_companies = ["Red", "Green", "Blue", "Black"] number_users = [ companies["has_Red_id"].astype("int64").sum(), companies["has_Green_id"].astype("int64").sum(), companies["has_Blue_id"].astype("int64").sum(), companies["has_Black_id"].astype("int64").sum(), ] fig = px.bar( x=name_companies, y=number_users, title="Количество зарегистрированных пользователей в компаниях", labels={"y": "Количество пользователей", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show() # Больше всего пользователей зарегистрировано в компании Red (> 55000), далее идет компания Blue (~13000), в компаниях Green и Black зарегистрировано примерно по 1000 человек # По каждой компании посмотрим распределение по количеству установок в зависимости от платформы name_platform = ["Apple", "Android", "Web"] fig = make_subplots( rows=2, cols=2, subplot_titles=("Red Company", "Green Company", "Blue Company", "Black Company"), ) platform_users_red = [ red_company["Red_apple"].astype("int64").sum(), red_company["Red_android"].astype("int64").sum(), red_company["Red_web"].astype("int64").sum(), ] platform_users_green = [ green_company["Green_apple"].astype("int64").sum(), green_company["Green_android"].astype("int64").sum(), green_company["Green_web"].astype("int64").sum(), ] platform_users_blue = [ blue_company["Blue_apple"].astype("int64").sum(), blue_company["Blue_android"].astype("int64").sum(), blue_company["Blue_web"].astype("int64").sum(), ] platform_users_black = [ black_company["Black_apple"].astype("int64").sum(), black_company["Black_android"].astype("int64").sum(), black_company["Black_web"].astype("int64").sum(), ] fig.add_trace(go.Bar(x=name_platform, y=platform_users_red), row=1, col=1) fig.add_trace(go.Bar(x=name_platform, y=platform_users_green), row=1, col=2) fig.add_trace(go.Bar(x=name_platform, y=platform_users_blue), row=2, col=1) fig.add_trace(go.Bar(x=name_platform, y=platform_users_black), row=2, col=2) fig.update_layout( title_text="Количество установок в зависимости от платформы", title_x=0.5, showlegend=False, yaxis_title="", ) fig["layout"]["yaxis"]["title"] = "Количество установок" fig["layout"]["yaxis3"]["title"] = "Количество установок" fig.show() # У наиболее популярных компаний распределение скачиваний очень похожи, больше всего использование через ПК, далее идет Android и Apple. # Компания Green скорее всего сконцентрирована на использовании на мобильных устройствах (мобильное приложение). Так как количество установок на мобильных платформах сильно превышает ПК. # Компания Black в больше пользуется популярностью на Apple. # Проверим разницу в gvm между пользователями из Москвы и пользователями из других городов moscow_users = ( companies.groupby("is_msk") .agg( mean_blue_gvm_total=("Blue_gmv_total", "mean"), mean_green_gvm_total=("Green_gmv_total", "mean"), mean_black_gvm_total=("Black_gmv_total", "mean"), ) .reset_index() ) moscow_users feature_names = ["Компания Blue", "Компания Green", "Компания Black"] fig = go.Figure( data=[ go.Bar(name="Not Moscow", x=feature_names, y=moscow_users.iloc[0,][1:]), go.Bar(name="Moscow", x=feature_names, y=moscow_users.iloc[1,][1:]), ] ) fig.update_layout( title_text="Среднее значение GMV пользователей из Москвы и пользователей из других городов", barmode="group", title_x=0.5, yaxis_title="Среднее значение GMV", ) fig.show() # Из графика видно, что пользователи из Москвы совершают покупок на большие суммы (GMV) чем пользователи из других городов # Посмотрим на среднее значение gmv пользователей для каждой компании avg_blue_gvm = blue_company[blue_company["has_Blue_id"] == 1]["Blue_gmv_total"].mean() avg_green_gvm = green_company[green_company["has_Green_id"] == 1][ "Green_gmv_total" ].mean() avg_black_gvm = black_company[black_company["has_Black_id"] == 1][ "Black_gmv_total" ].mean() feature_names = ["Компания Blue", "Компания Green", "Компания Black"] fig = px.bar( x=feature_names, y=[avg_blue_gvm, avg_green_gvm, avg_black_gvm], title="Среднее значение gmv пользователей", labels={"y": "Среднее значение gvm", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show() # GMV Компании Blue гораздо ниже дргуих компаний, это может свидетельствовать о том, что в этой компании продаются в основном недорогие товары. Или компании Green и Black продают более нишевые и дорогие товары. # Исходя из среднего значения gvm и количества зарегистрированных пользователей, можно сделать вывод о том, что компании Green и Black продают более нишевые и дорогостоящие товары по сравнению с компанией Blue, поэтому у Green и Black гораздо меньше пользователей, но при этом гораздо больше среднее значение gvm. # Посмотрим на соотношение успешных заказов в компаниях Green и Black avg_green_order_count = green_company[green_company["has_Green_id"] == 1][ "Green_gvm_order_count" ].mean() avg_black_order_count = black_company[black_company["has_Black_id"] == 1][ "Black_gvm_order_count" ].mean() feature_names = ["Компания Green", "Компания Black"] fig = px.bar( x=feature_names, y=[avg_green_order_count, avg_black_order_count], title="Среднее значение успешных заказов пользователей", labels={"y": "Среднее значение успешных заказов", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/084/129084638.ipynb
null
null
[{"Id": 129084638, "ScriptId": 38373443, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9598962, "CreationDate": "05/10/2023 22:05:49", "VersionNumber": 1.0, "Title": "Analytic_companies", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 294.0, "LinesInsertedFromPrevious": 294.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Импортируем необходимые модули import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import plotly.express as px from plotly.subplots import make_subplots from plotly import tools import plotly.graph_objs as go # Файл с данными: "Analytic_test". [Ссылка.](https://docs.google.com/spreadsheets/d/1YoYnKTWtTtOpAdUYDmaDvIJghn8CFBFI/edit?usp=share_link&ouid=115750805206343556774&rtpof=true&sd=true) # В нем в упорядоченном формате собраны данные о 4 компаниях: Red, Black, Green и Blue. # По компаниям предоставлены следующие данные: # * *Hid* - human_id, зашифрованный персональный идентификатор пользователя компаний # * *has_company_id* - наличие зарегистрированного аккаунта в компании # * *company_gmv_total* - суммарный gmv пользователя в сервисе # * *company_gvm_order_count* - количество успешных заказов пользователя в сервисе # * *company_activity_count* - количество активных действий пользователя в сервисе # * *company_android* - установленное у пользователя приложение компании на платформе android # * *company_apple* - установленное у пользователя приложение компании на платформе Apple # * *company_web* - установленное у пользователя приложение компании на ПК # * *is_msk* - территориальное расположение в Москве # Загружаем датасет companies = pd.read_excel("Analytic_test.xlsx", header=1, index_col=0) companies.head() # # Data Preprocessing print("Shape of companies dataset:", companies.shape) companies["HID"].nunique() # Все пользователи уникальны companies.info() # Скорее всего фича active_Red должна называться has_Red_id. # Проверим это companies["active_Red"].unique() # Так как в данной фиче содержатся 0 и 1, можно сделать вывод о том, что она отвечает за регистрацию аккаунта пользователя в компании Red # Переименуем даную фичу companies = companies.rename(columns={"active_Red": "has_Red_id"}) # Заменим в фичах company_gvm_total gvm на gmv companies = companies.rename( columns={"Green_gvm_total": "Green_gmv_total", "Black_gvm_total": "Black_gmv_total"} ) # Есть одно значение NaN в Blue_gmv_total. # Посмотрим на него companies[companies["Blue_gmv_total"].isna()] # Заменим пропущенное значение на 0, так как данная фича отвечает за gmv companies = companies.fillna(0) companies.info() # Приведем категориальные фичи к категориальному типу companies["has_Red_id"].unique() companies["has_Green_id"].unique() companies["has_Green_id"].value_counts() companies["has_Black_id"].unique() companies["has_Black_id"].value_counts() companies["has_Blue_id"].unique() companies["has_Blue_id"].value_counts() companies["Red_android"].unique() companies["Green_android"].unique() companies["Blue_android"].unique() companies["Black_android"].unique() companies["Red_apple"].unique() companies["Green_apple"].unique() companies["Blue_apple"].unique() companies["Black_apple"].unique() companies["is_msk"].unique() companies["Red_web"].unique() companies["Green_web"].unique() companies["Blue_web"].unique() companies["Black_web"].unique() # Так как признаки has_company_id отвечают за наличие зарегистрированного аккаунта пользователя, то они должны быть представлены двумя значениями: 0 - нет аккаунта и 1 - есть аккаунт. Поэтому, если там находится значение больше 1 - то ставим 1. for col in companies.columns: if "has" in col: companies.loc[companies[col] > 1.0, col] = 1.0 # Создаем список категориальных фичей category_features_names = [ col for col in companies.columns if ("has" in col) or ("android" in col) or ("apple" in col) or ("msk" in col) or ("web" in col) ] companies[category_features_names] = companies[category_features_names].astype( "category" ) companies.info() # Рассмотрим фичи с типом object companies["Blue_activity_count"].unique() companies["Green_gvm_order_count"].unique() companies["Black_gvm_order_count"].unique() # Данные фичи содержат дату в качестве значения # Удалим строки, которые содержат дату и приведем столбцы с count к численному типу objs_features = [ "Black_gvm_order_count", "Green_gvm_order_count", "Blue_activity_count", ] for feature in objs_features: companies[feature] = pd.to_numeric(companies[feature], errors="coerce") companies = companies.dropna() companies.info() # # Data Visualization companies.head() companies.info() # Разделим данные о компаниях по разным таблицам red_company = pd.DataFrame( companies[["HID"] + [col for col in companies.columns if "Red" in col] + ["is_msk"]] ) red_company.head() blue_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Blue" in col] + ["is_msk"] ] ) blue_company.head() green_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Green" in col] + ["is_msk"] ] ) green_company.head() black_company = pd.DataFrame( companies[ ["HID"] + [col for col in companies.columns if "Black" in col] + ["is_msk"] ] ) black_company.head() # График количества зарегистрированных пользователей для каждой компании name_companies = ["Red", "Green", "Blue", "Black"] number_users = [ companies["has_Red_id"].astype("int64").sum(), companies["has_Green_id"].astype("int64").sum(), companies["has_Blue_id"].astype("int64").sum(), companies["has_Black_id"].astype("int64").sum(), ] fig = px.bar( x=name_companies, y=number_users, title="Количество зарегистрированных пользователей в компаниях", labels={"y": "Количество пользователей", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show() # Больше всего пользователей зарегистрировано в компании Red (> 55000), далее идет компания Blue (~13000), в компаниях Green и Black зарегистрировано примерно по 1000 человек # По каждой компании посмотрим распределение по количеству установок в зависимости от платформы name_platform = ["Apple", "Android", "Web"] fig = make_subplots( rows=2, cols=2, subplot_titles=("Red Company", "Green Company", "Blue Company", "Black Company"), ) platform_users_red = [ red_company["Red_apple"].astype("int64").sum(), red_company["Red_android"].astype("int64").sum(), red_company["Red_web"].astype("int64").sum(), ] platform_users_green = [ green_company["Green_apple"].astype("int64").sum(), green_company["Green_android"].astype("int64").sum(), green_company["Green_web"].astype("int64").sum(), ] platform_users_blue = [ blue_company["Blue_apple"].astype("int64").sum(), blue_company["Blue_android"].astype("int64").sum(), blue_company["Blue_web"].astype("int64").sum(), ] platform_users_black = [ black_company["Black_apple"].astype("int64").sum(), black_company["Black_android"].astype("int64").sum(), black_company["Black_web"].astype("int64").sum(), ] fig.add_trace(go.Bar(x=name_platform, y=platform_users_red), row=1, col=1) fig.add_trace(go.Bar(x=name_platform, y=platform_users_green), row=1, col=2) fig.add_trace(go.Bar(x=name_platform, y=platform_users_blue), row=2, col=1) fig.add_trace(go.Bar(x=name_platform, y=platform_users_black), row=2, col=2) fig.update_layout( title_text="Количество установок в зависимости от платформы", title_x=0.5, showlegend=False, yaxis_title="", ) fig["layout"]["yaxis"]["title"] = "Количество установок" fig["layout"]["yaxis3"]["title"] = "Количество установок" fig.show() # У наиболее популярных компаний распределение скачиваний очень похожи, больше всего использование через ПК, далее идет Android и Apple. # Компания Green скорее всего сконцентрирована на использовании на мобильных устройствах (мобильное приложение). Так как количество установок на мобильных платформах сильно превышает ПК. # Компания Black в больше пользуется популярностью на Apple. # Проверим разницу в gvm между пользователями из Москвы и пользователями из других городов moscow_users = ( companies.groupby("is_msk") .agg( mean_blue_gvm_total=("Blue_gmv_total", "mean"), mean_green_gvm_total=("Green_gmv_total", "mean"), mean_black_gvm_total=("Black_gmv_total", "mean"), ) .reset_index() ) moscow_users feature_names = ["Компания Blue", "Компания Green", "Компания Black"] fig = go.Figure( data=[ go.Bar(name="Not Moscow", x=feature_names, y=moscow_users.iloc[0,][1:]), go.Bar(name="Moscow", x=feature_names, y=moscow_users.iloc[1,][1:]), ] ) fig.update_layout( title_text="Среднее значение GMV пользователей из Москвы и пользователей из других городов", barmode="group", title_x=0.5, yaxis_title="Среднее значение GMV", ) fig.show() # Из графика видно, что пользователи из Москвы совершают покупок на большие суммы (GMV) чем пользователи из других городов # Посмотрим на среднее значение gmv пользователей для каждой компании avg_blue_gvm = blue_company[blue_company["has_Blue_id"] == 1]["Blue_gmv_total"].mean() avg_green_gvm = green_company[green_company["has_Green_id"] == 1][ "Green_gmv_total" ].mean() avg_black_gvm = black_company[black_company["has_Black_id"] == 1][ "Black_gmv_total" ].mean() feature_names = ["Компания Blue", "Компания Green", "Компания Black"] fig = px.bar( x=feature_names, y=[avg_blue_gvm, avg_green_gvm, avg_black_gvm], title="Среднее значение gmv пользователей", labels={"y": "Среднее значение gvm", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show() # GMV Компании Blue гораздо ниже дргуих компаний, это может свидетельствовать о том, что в этой компании продаются в основном недорогие товары. Или компании Green и Black продают более нишевые и дорогие товары. # Исходя из среднего значения gvm и количества зарегистрированных пользователей, можно сделать вывод о том, что компании Green и Black продают более нишевые и дорогостоящие товары по сравнению с компанией Blue, поэтому у Green и Black гораздо меньше пользователей, но при этом гораздо больше среднее значение gvm. # Посмотрим на соотношение успешных заказов в компаниях Green и Black avg_green_order_count = green_company[green_company["has_Green_id"] == 1][ "Green_gvm_order_count" ].mean() avg_black_order_count = black_company[black_company["has_Black_id"] == 1][ "Black_gvm_order_count" ].mean() feature_names = ["Компания Green", "Компания Black"] fig = px.bar( x=feature_names, y=[avg_green_order_count, avg_black_order_count], title="Среднее значение успешных заказов пользователей", labels={"y": "Среднее значение успешных заказов", "x": "Компания"}, ) fig.update_layout(title_x=0.5) fig.show()
false
0
3,849
0
3,849
3,849
129084362
<jupyter_start><jupyter_text>UrbanSound8K This dataset contains 8732 labeled sound excerpts (&lt;=4s) of urban sounds from 10 classes: `air_conditioner`, `car_horn`, `children_playing`, `dog_bark`, `drilling`, `enginge_idling`, `gun_shot`, `jackhammer`, `siren`, and `street_music`. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to our paper. All excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above. In addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided. ## AUDIO FILES INCLUDED 8732 audio files of urban sounds (see description above) in WAV format. The sampling rate, bit depth, and number of channels are the same as those of the original file uploaded to Freesound (and hence may vary from file to file). ##META-DATA FILES INCLUDED ``` UrbanSound8k.csv ``` This file contains meta-data information about every audio file in the dataset. This includes: * slice_file_name: The name of the audio file. The name takes the following format: [fsID]-[classID]-[occurrenceID]-[sliceID].wav, where: [fsID] = the Freesound ID of the recording from which this excerpt (slice) is taken [classID] = a numeric identifier of the sound class (see description of classID below for further details) [occurrenceID] = a numeric identifier to distinguish different occurrences of the sound within the original recording [sliceID] = a numeric identifier to distinguish different slices taken from the same occurrence * fsID: The Freesound ID of the recording from which this excerpt (slice) is taken * start The start time of the slice in the original Freesound recording * end: The end time of slice in the original Freesound recording * salience: A (subjective) salience rating of the sound. 1 = foreground, 2 = background. * fold: The fold number (1-10) to which this file has been allocated. * classID: A numeric identifier of the sound class: 0 = air_conditioner 1 = car_horn 2 = children_playing 3 = dog_bark 4 = drilling 5 = engine_idling 6 = gun_shot 7 = jackhammer 8 = siren 9 = street_music * class: The class name: air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer, siren, street_music. ##BEFORE YOU DOWNLOAD: AVOID COMMON PITFALLS! Since releasing the dataset we have noticed a couple of common mistakes that could invalidate your results, potentially leading to manuscripts being rejected or the publication of incorrect results. To avoid this, please read the following carefully: 1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation The experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others) evaluate classification models via 10-fold cross validation using the predefined splits*. We strongly recommend following this procedure. Why? If you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split) you will be incorrectly placing related samples in both the train and test sets, leading to inflated scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong. Your results will NOT be comparable to previous results in the literature, meaning any claims to an improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research. 2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores We have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9, test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform 10-fold cross validation using the provided folds and report the average score. Why? Not all the splits are as "easy". That is, models tend to obtain much higher scores when trained on folds 1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason, it is important to evaluate your model on each of the 10 splits and report the average accuracy. Again, your results will NOT be comparable to previous results in the literature. ## Acknowledgements We kindly request that articles and other works in which this dataset is used cite the following paper: J. Salamon, C. Jacoby and J. P. Bello, "A Dataset and Taxonomy for Urban Sound Research", 22nd ACM International Conference on Multimedia, Orlando USA, Nov. 2014. More information at https://urbansounddataset.weebly.com/urbansound8k.html Kaggle dataset identifier: urbansound8k <jupyter_script># # Introduction # This dataset contains 8732 labeled sound excerpts (<=4s) of urban sounds from 10 classes: air_conditioner, car_horn, children_playing, dog_bark, drilling, enginge_idling, gun_shot, jackhammer, siren, and street_music. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to their paper. All excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above. # In addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) SAMPLING_FREQ = 22050 MFCC_COEF_RETAIN = 25 MFCC_COEF = 40 MFCC_WINDOW_DURATION = 0.0232 # in miliseconds # Load data sounds_df = pd.read_csv("/kaggle/input/urbansound8k/UrbanSound8K.csv") sounds_df.head() sounds_freq = sounds_df["class"].value_counts().sort_values() print(sounds_freq) sounds_freq.plot( kind="pie", figsize=(5, 5), title="Sounds", autopct="%1.1f%%", shadow=False, fontsize=8, ) folds_freq = sounds_df["fold"].value_counts().sort_index() print(folds_freq) folds_freq.plot( kind="pie", figsize=(5, 5), title="Folds", autopct="%1.1f%%", shadow=False, fontsize=8, ) import matplotlib.pyplot as plt plt.figure(figsize=[25, 10]) for i in range(1, 11): fold_df = sounds_df[sounds_df["fold"] == i] fold_freq = fold_df["class"].value_counts() plt.subplot(2, 5, i) fold_freq.plot( kind="pie", title=f"fold {i}", autopct="%1.1f%%", shadow=False, fontsize=8 ) import librosa from scipy.stats import skew from scipy.stats import kurtosis def get_mfcc(filename, fold): wave, sr = librosa.load( f"../input/urbansound8k/fold{fold}/{filename}", mono=True, sr=SAMPLING_FREQ ) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc( y=wave, sr=sr, n_mfcc=MFCC_COEF, hop_length=int(MFCC_WINDOW_DURATION * sr / 2.0), n_fft=int(MFCC_WINDOW_DURATION * sr), ) mfccs = (mfccs - np.mean(mfccs)) / np.std(mfccs) # keep the first 25 mfccs = mfccs[:MFCC_COEF_RETAIN, :] mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack( ( mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var, ) ) return pd.Series([mfccs, mfccs_stats.transpose()]) sounds_df["duration"] = sounds_df["end"] - sounds_df["start"] sounds_df.plot.hist(bins=10, column=["duration"], by="class", figsize=(5, 20)) plt.tight_layout() from tqdm import tqdm tqdm.pandas() sounds_df[["mfccs", "mfccs_stats"]] = sounds_df[ ["slice_file_name", "fold"] ].progress_apply(lambda x: get_mfcc(*x), axis=1) plt.figure(figsize=[15, 10]) for i in range(0, 9): ax = plt.subplot(3, 3, i + 1) img = librosa.display.specshow( sounds_df["mfccs"][i], x_axis="time", hop_length=int(0.0232 * SAMPLING_FREQ / 2.0), ) ax.set(title=sounds_df["class"][i]) plt.colorbar() plt.tight_layout() from ipywidgets import Output, GridspecLayout, Layout import IPython import IPython.display as ipd grid = GridspecLayout( 2, 5, align_items="top", layout=Layout(width="auto", height="auto") ) for i in range(0, 5): out = Output() with out: fig, ax = plt.subplots() plt.imshow(sounds_df["mfccs_stats"][i], origin="lower") ax.set(title=sounds_df["class"][i]) plt.close(fig) ipd.display(ax.figure) grid[0, i] = out out = Output() with out: ipd.display( ipd.Audio( f"../input/urbansound8k/fold{sounds_df['fold'][i]}/{sounds_df['slice_file_name'][i]}" ) ) grid[1, i] = out ipd.display(grid) plt.tight_layout() sounds_df.head() max_length = sounds_df["mfccs_stats"][0].shape print(max_length) max_length = sounds_df["mfccs"][1].shape print(max_length)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/084/129084362.ipynb
urbansound8k
chrisfilo
[{"Id": 129084362, "ScriptId": 38308333, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2493382, "CreationDate": "05/10/2023 22:00:18", "VersionNumber": 3.0, "Title": "UrbanSound8K Classification", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 46.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 81.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184824427, "KernelVersionId": 129084362, "SourceDatasetVersionId": 928025}]
[{"Id": 928025, "DatasetId": 500970, "DatasourceVersionId": 955383, "CreatorUserId": 2102373, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "02/04/2020 18:37:24", "VersionNumber": 1.0, "Title": "UrbanSound8K", "Slug": "urbansound8k", "Subtitle": "8732 labeled sound excerpts", "Description": "This dataset contains 8732 labeled sound excerpts (&lt;=4s) of urban sounds from 10 classes: `air_conditioner`, `car_horn`, `children_playing`, `dog_bark`, `drilling`, `enginge_idling`, `gun_shot`, `jackhammer`, `siren`, and `street_music`. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to our paper.\nAll excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above.\n\nIn addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided.\n\n## AUDIO FILES INCLUDED\n\n8732 audio files of urban sounds (see description above) in WAV format. The sampling rate, bit depth, and number of channels are the same as those of the original file uploaded to Freesound (and hence may vary from file to file).\n\n##META-DATA FILES INCLUDED\n```\nUrbanSound8k.csv\n\n```\nThis file contains meta-data information about every audio file in the dataset. This includes:\n\n* slice_file_name: \nThe name of the audio file. The name takes the following format: [fsID]-[classID]-[occurrenceID]-[sliceID].wav, where:\n[fsID] = the Freesound ID of the recording from which this excerpt (slice) is taken\n[classID] = a numeric identifier of the sound class (see description of classID below for further details)\n[occurrenceID] = a numeric identifier to distinguish different occurrences of the sound within the original recording\n[sliceID] = a numeric identifier to distinguish different slices taken from the same occurrence\n\n* fsID:\nThe Freesound ID of the recording from which this excerpt (slice) is taken\n\n* start\nThe start time of the slice in the original Freesound recording\n\n* end:\nThe end time of slice in the original Freesound recording\n\n* salience:\nA (subjective) salience rating of the sound. 1 = foreground, 2 = background.\n\n* fold:\nThe fold number (1-10) to which this file has been allocated.\n\n* classID:\nA numeric identifier of the sound class:\n0 = air_conditioner\n1 = car_horn\n2 = children_playing\n3 = dog_bark\n4 = drilling\n5 = engine_idling\n6 = gun_shot\n7 = jackhammer\n8 = siren\n9 = street_music\n\n* class:\nThe class name: air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer, \nsiren, street_music.\n\n##BEFORE YOU DOWNLOAD: AVOID COMMON PITFALLS!\n\nSince releasing the dataset we have noticed a couple of common mistakes that could invalidate your results, potentially leading to manuscripts being rejected or the publication of incorrect results. To avoid this, please read the following carefully:\n\n1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation\nThe experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others) evaluate classification models via 10-fold cross validation using the predefined splits*. We strongly recommend following this procedure.\n\nWhy?\nIf you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split) you will be incorrectly placing related samples in both the train and test sets, leading to inflated scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong.\nYour results will NOT be comparable to previous results in the literature, meaning any claims to an improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research.\n\n2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores\nWe have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9, test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform 10-fold cross validation using the provided folds and report the average score.\n\nWhy?\nNot all the splits are as \"easy\". That is, models tend to obtain much higher scores when trained on folds 1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason, it is important to evaluate your model on each of the 10 splits and report the average accuracy.\nAgain, your results will NOT be comparable to previous results in the literature.\n\n\n## Acknowledgements\n\nWe kindly request that articles and other works in which this dataset is used cite the following paper:\n\nJ. Salamon, C. Jacoby and J. P. Bello, \"A Dataset and Taxonomy for Urban Sound Research\", 22nd ACM International Conference on Multimedia, Orlando USA, Nov. 2014.\n\nMore information at https://urbansounddataset.weebly.com/urbansound8k.html", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 500970, "CreatorUserId": 2102373, "OwnerUserId": 2102373.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 928025.0, "CurrentDatasourceVersionId": 955383.0, "ForumId": 514107, "Type": 2, "CreationDate": "02/04/2020 18:37:24", "LastActivityDate": "02/04/2020", "TotalViews": 70225, "TotalDownloads": 14601, "TotalVotes": 155, "TotalKernels": 137}]
[{"Id": 2102373, "UserName": "chrisfilo", "DisplayName": "Chris Gorgolewski", "RegisterDate": "07/26/2018", "PerformanceTier": 2}]
# # Introduction # This dataset contains 8732 labeled sound excerpts (<=4s) of urban sounds from 10 classes: air_conditioner, car_horn, children_playing, dog_bark, drilling, enginge_idling, gun_shot, jackhammer, siren, and street_music. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to their paper. All excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above. # In addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) SAMPLING_FREQ = 22050 MFCC_COEF_RETAIN = 25 MFCC_COEF = 40 MFCC_WINDOW_DURATION = 0.0232 # in miliseconds # Load data sounds_df = pd.read_csv("/kaggle/input/urbansound8k/UrbanSound8K.csv") sounds_df.head() sounds_freq = sounds_df["class"].value_counts().sort_values() print(sounds_freq) sounds_freq.plot( kind="pie", figsize=(5, 5), title="Sounds", autopct="%1.1f%%", shadow=False, fontsize=8, ) folds_freq = sounds_df["fold"].value_counts().sort_index() print(folds_freq) folds_freq.plot( kind="pie", figsize=(5, 5), title="Folds", autopct="%1.1f%%", shadow=False, fontsize=8, ) import matplotlib.pyplot as plt plt.figure(figsize=[25, 10]) for i in range(1, 11): fold_df = sounds_df[sounds_df["fold"] == i] fold_freq = fold_df["class"].value_counts() plt.subplot(2, 5, i) fold_freq.plot( kind="pie", title=f"fold {i}", autopct="%1.1f%%", shadow=False, fontsize=8 ) import librosa from scipy.stats import skew from scipy.stats import kurtosis def get_mfcc(filename, fold): wave, sr = librosa.load( f"../input/urbansound8k/fold{fold}/{filename}", mono=True, sr=SAMPLING_FREQ ) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc( y=wave, sr=sr, n_mfcc=MFCC_COEF, hop_length=int(MFCC_WINDOW_DURATION * sr / 2.0), n_fft=int(MFCC_WINDOW_DURATION * sr), ) mfccs = (mfccs - np.mean(mfccs)) / np.std(mfccs) # keep the first 25 mfccs = mfccs[:MFCC_COEF_RETAIN, :] mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack( ( mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var, ) ) return pd.Series([mfccs, mfccs_stats.transpose()]) sounds_df["duration"] = sounds_df["end"] - sounds_df["start"] sounds_df.plot.hist(bins=10, column=["duration"], by="class", figsize=(5, 20)) plt.tight_layout() from tqdm import tqdm tqdm.pandas() sounds_df[["mfccs", "mfccs_stats"]] = sounds_df[ ["slice_file_name", "fold"] ].progress_apply(lambda x: get_mfcc(*x), axis=1) plt.figure(figsize=[15, 10]) for i in range(0, 9): ax = plt.subplot(3, 3, i + 1) img = librosa.display.specshow( sounds_df["mfccs"][i], x_axis="time", hop_length=int(0.0232 * SAMPLING_FREQ / 2.0), ) ax.set(title=sounds_df["class"][i]) plt.colorbar() plt.tight_layout() from ipywidgets import Output, GridspecLayout, Layout import IPython import IPython.display as ipd grid = GridspecLayout( 2, 5, align_items="top", layout=Layout(width="auto", height="auto") ) for i in range(0, 5): out = Output() with out: fig, ax = plt.subplots() plt.imshow(sounds_df["mfccs_stats"][i], origin="lower") ax.set(title=sounds_df["class"][i]) plt.close(fig) ipd.display(ax.figure) grid[0, i] = out out = Output() with out: ipd.display( ipd.Audio( f"../input/urbansound8k/fold{sounds_df['fold'][i]}/{sounds_df['slice_file_name'][i]}" ) ) grid[1, i] = out ipd.display(grid) plt.tight_layout() sounds_df.head() max_length = sounds_df["mfccs_stats"][0].shape print(max_length) max_length = sounds_df["mfccs"][1].shape print(max_length)
false
1
1,763
0
3,080
1,763
129084780
import os import math from IPython import display import numpy as np import matplotlib.pyplot as plt from matplotlib import animation train_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/train" ) validation_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/validation" ) test_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/test" ) def read_record(record_id, directory): record_data = {} for x in [ "band_11", "band_14", "band_15", "human_pixel_masks", "human_individual_masks", ]: try: with open(os.path.join(directory, record_id, x + ".npy"), "rb") as f: record_data[x] = np.load(f) except Exception as e: pass return record_data def normalize_range(data, bounds): """Maps data to the range [0, 1].""" return (data - bounds[0]) / (bounds[1] - bounds[0]) def get_false_color(record_data): _T11_BOUNDS = (243, 303) _CLOUD_TOP_TDIFF_BOUNDS = (-4, 5) _TDIFF_BOUNDS = (-4, 2) r = normalize_range(record_data["band_15"] - record_data["band_14"], _TDIFF_BOUNDS) g = normalize_range( record_data["band_14"] - record_data["band_11"], _CLOUD_TOP_TDIFF_BOUNDS ) b = normalize_range(record_data["band_14"], _T11_BOUNDS) false_color = np.clip(np.stack([r, g, b], axis=2), 0, 1) return false_color def draw(i): im.set_array(false_color[..., i]) return [im] N_TIMES_BEFORE = 4 record_id = "1000603527582775543" record_data = read_record(record_id, train_path) false_color = get_false_color(record_data) img = false_color[..., N_TIMES_BEFORE] plt.figure(figsize=(6, 18)) ax = plt.subplot(3, 1, 1) ax.imshow(img) ax.set_title("False color image") ax = plt.subplot(3, 1, 2) ax.imshow(record_data["human_pixel_masks"], interpolation="none") ax.set_title("Ground truth contrail mask") img.shape
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/084/129084780.ipynb
null
null
[{"Id": 129084780, "ScriptId": 38373726, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4483507, "CreationDate": "05/10/2023 22:08:47", "VersionNumber": 1.0, "Title": "contrail_trb_workfile", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 74.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import os import math from IPython import display import numpy as np import matplotlib.pyplot as plt from matplotlib import animation train_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/train" ) validation_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/validation" ) test_path: str = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming/test" ) def read_record(record_id, directory): record_data = {} for x in [ "band_11", "band_14", "band_15", "human_pixel_masks", "human_individual_masks", ]: try: with open(os.path.join(directory, record_id, x + ".npy"), "rb") as f: record_data[x] = np.load(f) except Exception as e: pass return record_data def normalize_range(data, bounds): """Maps data to the range [0, 1].""" return (data - bounds[0]) / (bounds[1] - bounds[0]) def get_false_color(record_data): _T11_BOUNDS = (243, 303) _CLOUD_TOP_TDIFF_BOUNDS = (-4, 5) _TDIFF_BOUNDS = (-4, 2) r = normalize_range(record_data["band_15"] - record_data["band_14"], _TDIFF_BOUNDS) g = normalize_range( record_data["band_14"] - record_data["band_11"], _CLOUD_TOP_TDIFF_BOUNDS ) b = normalize_range(record_data["band_14"], _T11_BOUNDS) false_color = np.clip(np.stack([r, g, b], axis=2), 0, 1) return false_color def draw(i): im.set_array(false_color[..., i]) return [im] N_TIMES_BEFORE = 4 record_id = "1000603527582775543" record_data = read_record(record_id, train_path) false_color = get_false_color(record_data) img = false_color[..., N_TIMES_BEFORE] plt.figure(figsize=(6, 18)) ax = plt.subplot(3, 1, 1) ax.imshow(img) ax.set_title("False color image") ax = plt.subplot(3, 1, 2) ax.imshow(record_data["human_pixel_masks"], interpolation="none") ax.set_title("Ground truth contrail mask") img.shape
false
0
697
0
697
697
129110407
<jupyter_start><jupyter_text>Wine Dataset for Clustering This dataset is adapted from the Wine Data Set from https://archive.ics.uci.edu/ml/datasets/wine by removing the information about the types of wine for unsupervised learning. The following descriptions are adapted from the UCI webpage: These data are the results of a chemical analysis of wines grown in the same region in Italy but derived from three different cultivars. The analysis determined the quantities of 13 constituents found in each of the three types of wines. The attributes are: - Alcohol - Malic acid - Ash - Alcalinity of ash - Magnesium - Total phenols - Flavanoids - Nonflavanoid phenols - Proanthocyanins - Color intensity - Hue - OD280/OD315 of diluted wines - Proline Kaggle dataset identifier: wine-dataset-for-clustering <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans print("Done") # загрузка данных и проверка на количество непустых значений dataset = pd.read_csv("/kaggle/input/wine-dataset-for-clustering/wine-clustering.csv") dataset.info() dataset.describe() # подбор нужного количества кластеров: inertia = [] for i in range(1, 11): # init выбирает цетроиды,n_init-кол-во запусков(в случае k-means будет 1) k_means = KMeans(n_clusters=i, init="k-means++", n_init="auto") k_means.fit(dataset) inertia.append(k_means.inertia_) sns.set_style("darkgrid") sns.scatterplot( x=[x for x in range(1, 11)], y=inertia, ) plt.title("График зависимости") plt.xlabel("Количество кластеров") plt.ylabel("Внутри-кластерная сумма расстояний") # Точка на графике, где происходит резкий переход к меньшему уменьшению SSE, называется "локтевой точкой". Эта точка указывает на оптимальное количество кластеров, при котором дальнейшее увеличение числа кластеров не приведет к существенному уменьшению SSE. В моем случае резкий переход останавливается на числе кластеров = 4. CLUSTERS = 4 model = KMeans(n_clusters=CLUSTERS, n_init="auto") model.fit(dataset) clusters = pd.DataFrame(columns=dataset.columns, data=model.cluster_centers_) clusters["Amount"] = np.unique(model.labels_, return_counts=True)[1] clusters y_kmeans = kmeans.predict(X) # координаты центроидов кластеров print(model.cluster_centers_)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/110/129110407.ipynb
wine-dataset-for-clustering
harrywang
[{"Id": 129110407, "ScriptId": 38107112, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13676645, "CreationDate": "05/11/2023 04:57:12", "VersionNumber": 1.0, "Title": "statoiv-lab3", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 50.0, "LinesInsertedFromPrevious": 50.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184876398, "KernelVersionId": 129110407, "SourceDatasetVersionId": 1116242}]
[{"Id": 1116242, "DatasetId": 626341, "DatasourceVersionId": 1146567, "CreatorUserId": 344198, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/29/2020 00:50:33", "VersionNumber": 1.0, "Title": "Wine Dataset for Clustering", "Slug": "wine-dataset-for-clustering", "Subtitle": "Cluster wines based on their chemical constituents", "Description": "This dataset is adapted from the Wine Data Set from https://archive.ics.uci.edu/ml/datasets/wine by removing the information about the types of wine for unsupervised learning. \n\nThe following descriptions are adapted from the UCI webpage:\n\nThese data are the results of a chemical analysis of wines grown in the same region in Italy but derived from three different cultivars. The analysis determined the quantities of 13 constituents found in each of the three types of wines.\n\nThe attributes are:\n\n- Alcohol\n- Malic acid\n- Ash\n- Alcalinity of ash\n- Magnesium\n- Total phenols\n- Flavanoids\n- Nonflavanoid phenols\n- Proanthocyanins\n- Color intensity\n- Hue\n- OD280/OD315 of diluted wines\n- Proline", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 626341, "CreatorUserId": 344198, "OwnerUserId": 344198.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1116242.0, "CurrentDatasourceVersionId": 1146567.0, "ForumId": 640539, "Type": 2, "CreationDate": "04/29/2020 00:50:33", "LastActivityDate": "04/29/2020", "TotalViews": 105109, "TotalDownloads": 14824, "TotalVotes": 120, "TotalKernels": 81}]
[{"Id": 344198, "UserName": "harrywang", "DisplayName": "Harry Wang", "RegisterDate": "04/29/2015", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans print("Done") # загрузка данных и проверка на количество непустых значений dataset = pd.read_csv("/kaggle/input/wine-dataset-for-clustering/wine-clustering.csv") dataset.info() dataset.describe() # подбор нужного количества кластеров: inertia = [] for i in range(1, 11): # init выбирает цетроиды,n_init-кол-во запусков(в случае k-means будет 1) k_means = KMeans(n_clusters=i, init="k-means++", n_init="auto") k_means.fit(dataset) inertia.append(k_means.inertia_) sns.set_style("darkgrid") sns.scatterplot( x=[x for x in range(1, 11)], y=inertia, ) plt.title("График зависимости") plt.xlabel("Количество кластеров") plt.ylabel("Внутри-кластерная сумма расстояний") # Точка на графике, где происходит резкий переход к меньшему уменьшению SSE, называется "локтевой точкой". Эта точка указывает на оптимальное количество кластеров, при котором дальнейшее увеличение числа кластеров не приведет к существенному уменьшению SSE. В моем случае резкий переход останавливается на числе кластеров = 4. CLUSTERS = 4 model = KMeans(n_clusters=CLUSTERS, n_init="auto") model.fit(dataset) clusters = pd.DataFrame(columns=dataset.columns, data=model.cluster_centers_) clusters["Amount"] = np.unique(model.labels_, return_counts=True)[1] clusters y_kmeans = kmeans.predict(X) # координаты центроидов кластеров print(model.cluster_centers_)
false
1
618
0
845
618
129110749
# ## Dự đoán khả năng sống sót trên tàu Titanic # Vụ đắm tàu RMS Titanic là một trong những vụ đắm tàu khét tiếng nhất trong lịch sử. Vào ngày 15 tháng 4 năm 1912, trong chuyến hành trình đầu tiên của mình, Titanic đã chìm sau khi va chạm với một tảng băng trôi, khiến 1502 trong số 2224 hành khách và thủy thủ đoàn thiệt mạng. Thảm kịch giật gân này đã gây sốc cho cộng đồng quốc tế và dẫn đến các quy định an toàn tốt hơn cho các con tàu. Một trong những nguyên nhân khiến vụ đắm tàu dẫn đến thiệt hại về người như vậy là do không có đủ xuồng cứu sinh cho hành khách và thủy thủ đoàn. Mặc dù có một số yếu tố may mắn liên quan đến việc sống sót sau vụ chìm tàu, nhưng một số nhóm người có nhiều khả năng sống sót hơn những nhóm khác, chẳng hạn như phụ nữ, trẻ em và tầng lớp thượng lưu. Chúng ta cần phân tích xem những loại người nào có khả năng sống sót. # Mô tả bộ dữ liệu sử dụng: # + Hai bộ dữ liệu tương tự bao gồm thông tin về hành khách như tên, tuổi, giới tính, tầng lớp kinh tế xã hội, v.v. Một bộ dữ liệu có tên là train.csv và bộ dữ liệu kia có tên là test.csv. # + Train.csv sẽ chứa thông tin chi tiết về một nhóm nhỏ hành khách trên tàu (chính xác là 891) và quan trọng là sẽ tiết lộ liệu họ có sống sót hay không, còn được gọi là “Survived”. # + Bộ dữ liệu test.csv chứa thông tin tương tự nhưng không tiết lộ “Survived” về mỗi hành khách. # + Sử dụng các mẫu tìm thấy trong dữ liệu train.csv, dự đoán xem 418 hành khách khác trên tàu (tìm thấy trong test.csv) có sống sót hay không. # Mô tả biến: Có 1309 bản ghi của 12 biến. # + Survived: Sống sót (1) hoặc Không (0) # + Pclass: Hạng hành khách # + Name: Tên hành khách # + Sex: Giới tính của hành khách # + Age: Tuổi của hành khách # + SibSp: Số anh chị em/vợ/chồng trên tàu # + Parch: Số phụ huynh/con cái trên tàu # + Ticket: Số vé # + Fare: Giá vé # + Cabin: cabin # + Embarked: Cảng lên tàu # from fastai.imports import * np.set_printoptions(linewidth=130) # ## Tiền xử lý dữ liệu # Import thư viện và dữ liệu. import os iskaggle = os.environ.get("KAGGLE_KERNEL_RUN_TYPE", "") if iskaggle: path = Path("../input/titanic") else: import zipfile, kaggle path = Path("titanic") kaggle.api.competition_download_cli(str(path)) zipfile.ZipFile(f"{path}.zip").extractall(path) df = pd.read_csv(path / "train.csv") tst_df = pd.read_csv(path / "test.csv") # print(df) # print(tst_df) modes = df.mode().iloc[0] # print(modes) # Với các cột không phải số, chuyển đổi các trường đó thành các biến phân loại, bên trong Pandas tạo danh sách tất cả các giá trị duy nhất trong cột và thay thế từng giá trị bằng một số. Số này chỉ là một chỉ mục để tra cứu giá trị trong danh sách tất cả các giá trị duy nhất. # + df['LogFare'] = np.log1p(df['Fare']): Dòng này tính log(x+1) của cột 'Fare' và gán vào cột mới 'LogFare'. Ta làm việc này để làm giảm độ biến thiên giá trị của cột 'Fare' qua đó giúp chuẩn hóa phân phối và còn giúp làm nổi bật các sự khác biệt nhỏ nhằm tăng khả năng phân loại. def proc_data(df): df["Fare"] = df.Fare.fillna(0) df.fillna(modes, inplace=True) df["LogFare"] = np.log1p(df["Fare"]) df["Embarked"] = pd.Categorical(df.Embarked) df["Sex"] = pd.Categorical(df.Sex) proc_data(df) proc_data(tst_df) # print(df.LogFare) # print(df.Embarked) # Tạo một danh sách các biến liên tục(continuous), phân loại(categorical) và phụ thuộc(dependent). Lưu ý rằng không còn coi Pclass là một biến phân loại. Đó là bởi vì nó có thứ tự (tức là lớp 1, 2 và 3 có thứ tự) và cây quyết định, như chúng ta sẽ thấy, chỉ quan tâm đến thứ tự, không quan tâm đến giá trị tuyệt đối. # cats = ["Sex", "Embarked"] conts = ["Age", "SibSp", "Parch", "LogFare", "Pclass"] dep = "Survived" # Ngay cả khi đã tạo cột 'cats' theo phân loại, chúng vẫn được Pandas hiển thị dưới dạng giá trị ban đầu của chúng df.Sex.head() # Tuy nhiên, đằng sau đó, chúng hiện được lưu trữ dưới dạng số nguyên, với các chỉ số được tra cứu trong danh sách 'Category' được hiển thị ở đầu ra ở trên. Chúng ta có thể xem các giá trị được lưu trữ bằng cách xem thuộc tính “cat.codes”: # df.Embarked.cat.codes.head() # ## Chia nhị phân # + Trước khi tạo Random Forest hoặc Gradient Boosting Machine, trước tiên cần tìm hiểu cách tạo Cây quyết định, cái mà cả hai mô hình này được dựa trên. # + Và để tạo cây quyết định, trước tiên chúng ta cần tạo phân chia nhị phân, vì đây là thứ mà cây quyết định được xây dựng từ đó. # + Phân chia nhị phân là nơi tất cả các hàng được đặt vào một trong hai nhóm, dựa trên việc chúng ở trên hay dưới một số ngưỡng của một số cột. Ví dụ: có thể chia các hàng trong tập dữ liệu thành nam và nữ bằng cách sử dụng ngưỡng 0,5 và cột Giới tính (vì các giá trị trong cột là 0 cho nữ và 1 cho nam). Có thể sử dụng một biểu đồ để xem điều đó sẽ phân chia dữ liệu như thế nào. Sử dụng thư viện Seaborn, là một lớp nằm trên matplotlib giúp tạo một số biểu đồ để có thể quan sát dễ dàng hơn # import seaborn as sns fig, axs = plt.subplots(1, 2, figsize=(11, 5)) sns.barplot(data=df, y=dep, x="Sex", ax=axs[0]).set(title="Survival rate") sns.countplot(data=df, x="Sex", ax=axs[1]).set(title="Histogram") # + Ở đây, có thể thấy rằng (ở bên trái) nếu ta chia dữ liệu thành nam và nữ, ta sẽ có các nhóm có tỷ lệ sống sót rất khác nhau: >70% đối với nữ và <20% đối với nam. Ta cũng có thể thấy (ở bên phải) rằng sự phân chia sẽ khá đồng đều, với hơn 300 hành khách (trong số khoảng 900) trong mỗi nhóm. # + Có thể tạo ra một "model" rất đơn giản nói rằng tất cả nữ đều sống sót và không nam nào sống sót. Để làm như vậy, trước tiên nên chia dữ liệu thành một tập train và validation: # from numpy import random from sklearn.model_selection import train_test_split random.seed(42) trn_df, val_df = train_test_split(df, test_size=0.25) trn_df[cats] = trn_df[cats].apply(lambda x: x.cat.codes) val_df[cats] = val_df[cats].apply(lambda x: x.cat.codes) # (Trong bước trước, cũng đã thay thế các biến phân loại bằng mã số nguyên của chúng, vì một số mô hình xây dựng ở dưới yêu cầu điều đó.) # - Bây giờ có thể tạo các biến độc lập (biến x) và phụ thuộc (biến y): # (xs là giá trị các biến của bản ghi (ngoài survived) nếu dep(survived) có trong df thì return ra y( xử lý tránh missing data) ) def xs_y(df): xs = df[cats + conts].copy() return xs, df[dep] if dep in df else None trn_xs, trn_y = xs_y(trn_df) val_xs, val_y = xs_y(val_df) # print(val_xs, val_y) # Hàm kiểm tra giá trị cột Sex trong 'val_xs' nếu giá trị cột Sex bằng 0 thì trả về True và ngược lại: preds = val_xs.Sex == 0 # print(preds) # Sử dụng sai số tuyệt đối trung bình để tính giá trị trung bình của độ lỗi tuyệt đối giữa hai mảng 'val_y' và 'preds' qua đó đánh giá mức độ tốt của mô hình này from sklearn.metrics import mean_absolute_error mean_absolute_error(val_y, preds) # Ngoài ra, có thể thử tách trên một cột liên tục. Sử dụng một biểu đồ khác để xem biểu đồ này có thể hoạt động như thế nào -- đây là một ví dụ về có thể xem ở “LogFare”: # df_fare = trn_df[trn_df.LogFare > 0] fig, axs = plt.subplots(1, 2, figsize=(11, 5)) sns.boxenplot(data=df_fare, x=dep, y="LogFare", ax=axs[0]) sns.kdeplot(data=df_fare, x="LogFare", ax=axs[1]) # + Boxenplot ở trên hiển thị các lượng tử LogFare cho mỗi nhóm Survived==0 và Survived==1. # Nó cho thấy rằng LogFare trung bình cho những hành khách không qua khỏi là khoảng 2,5 và # cho những hành khách đã sống sót là khoảng 3,2. Vì vậy, có vẻ như những người trả nhiều # tiền hơn cho vé của họ có nhiều khả năng được đưa lên xuồng cứu sinh hơn. # + Lấy ngưỡng cho 'LogFare' là 2.7 nếu 'LogFare'>2.7 thì sẽ được gán giá trị True và ngược lại sẽ có giá trị False: preds = val_xs.LogFare > 2.7 # print(preds) # Sử dụng sai số tuyệt đối trung bình để tính giá trị trung bình của độ lỗi tuyệt đối giữa hai mảng 'val_y' và 'preds' qua đó đánh giá mức độ tốt của mô hình này: mean_absolute_error(val_y, preds) # Mô hình này kém chính xác hơn một chút so với mô hình của chúng ta đã sử dụng “Sex” làm phân chia nhị phân. # + Lý tưởng nhất là thử nhiều cột và breakpoint một cách dễ dàng hơn. Có thể tạo một hàm trả về mức độ tốt của mô hình, để thử nhanh hơn một vài cách tách khác nhau. Ta sẽ tạo một hàm 'score' để làm điều này. Thay vì trả về sai số tuyệt đối trung bình, taa sẽ tính toán thước đo độ không thuần khiết -- nghĩa là mức độ phân chia nhị phân tạo ra hai nhóm trong đó các hàng trong một nhóm tương tự nhau hoặc không giống nhau. # + Ta có thể đo mức độ giống nhau của các hàng trong một nhóm bằng cách lấy độ lệch chuẩn của biến phụ thuộc. Nếu nó cao hơn, thì điều đó có nghĩa là các hàng khác nhau nhiều hơn. Sau đó, ta sẽ nhân số này với số lượng hàng, vì nhóm lớn hơn có tác động nhiều hơn nhóm nhỏ hơn: # Hàm này sẽ giúp chúng ta tính độ lệch chuẩn của một mảng sử dụng hàm 'std()' def _side_score(side, y): tot = side.sum() if tot <= 1: return 0 return y[side].std() * tot # Hàm 'score' sẽ giá trị trung bình của độ lệch chuẩn của 2 mảng được tạo ra sao khi chia ngưỡng. Và kết quả cuối cùng sẽ chia cho độ dài của mảng: def score(col, y, split): lhs = col <= split return (_side_score(lhs, y) + _side_score(~lhs, y)) / len(y) # Tính điểm cho 'Sex': score(trn_xs["Sex"], trn_y, 0.5) # Và tính cho `LogFare`: score(trn_xs["LogFare"], trn_y, 2.7) # Giống với kết quả khi tính bằng sai số tuyệt đối, độ lệch chuẩn của 'Sex' nhỏ hơn 'LogFare' nên 'Sex' là mô hình tốt hơn! # Để dễ dàng tìm ra cách phân tách nhị phân tốt nhất, Ta có thể tạo một công cụ tương tác đơn giản (sử dụng trên Kaggle): # def iscore(nm, split): col = trn_xs[nm] return score(col, trn_y, split) from ipywidgets import interact interact(nm=conts, split=15.5)(iscore) # Có thể thử chọn các cột và điểm phân chia khác nhau bằng cách sử dụng menu thả xuống và thanh trượt ở trên ta có thể tìm thấy những phần tách nào làm tăng độ tinh khiết của dữ liệu. interact(nm=cats[0], split=0.5)(iscore) # Mô hình đã hoạt động nhưng để mô hình hoạt động tốt hơn ta sẽ tìm điểm phân chia tốt nhất cho một cột. Ví dụ: để tìm điểm phân chia tốt nhất cho độ tuổi, trước tiên ta sẽ cần lập danh sách tất cả các điểm phân chia có thể có (tức là tất cả các giá trị duy nhất của trường đó): và tìm xem chỉ số nào trong số các giá trị đó có điểm số() thấp nhất # nm = "Age" col = trn_xs[nm] unq = col.unique() unq.sort() unq # Sau đó tìm vị trí có điểm score() thấp nhất scores = np.array([score(col, trn_y, o) for o in unq if not np.isnan(o)]) unq[scores.argmin()] # Ta thấy được là đối với cột “Age”, 6 là ngưỡng tối ưu theo tập train def min_col(df, nm): col, y = df[nm], df[dep] unq = col.dropna().unique() scores = np.array([score(col, y, o) for o in unq if not np.isnan(o)]) idx = scores.argmin() return unq[idx], scores[idx] min_col(trn_df, "Age") # Thử với tất cả các cột khác: # Ta có thể thấy cột 'Sex' với ngưỡng là 0 có số điểm tốt nhất so với các cột còn lại, đúng với dự đoán mô hình ban đầu khi ta cho tất cả những bản ghi có giá trị Sex=0(female) là sống sót. cols = cats + conts {o: min_col(trn_df, o) for o in cols} # Ta có, “Sex” <=0 là cách phân chia tốt nhất mà chúng ta có thể sử dụng. Ta vừa tạo lại bộ phân loại OneR được coi là một trong những bộ phân loại hiệu quả nhất trong bộ dữ liệu thế giới thực. Vì nó rất đơn giản và hiệu quả,đây điểm khởi đầu mà chúng ta có thể sử dụng để so sánh các mô hình phức tạp hơn. # # -> Trước đó, chúng ta đã tính đươc·là quy tắc OneR có sai số khoảng 0,215, vì vậy ta sẽ ghi nhớ lại điều này khi thử các mô hình phức tạp hơn. # ## Cây quyết định # + Để cải thiện mô hình OneR (Mô hình quyết định đơn giản) dự đoán chỉ dựa vào giới tính. Ta có thể thực hiện cải thiện cho bộ phân loại OneR, dự đoán khả năng sống sót dựa trên giới tính, bằng cách tạo thêm một phân tách nhị phân cho từng nhóm 'male' và 'female'. Đó là, tìm phân tách tốt nhất cho 'male' và 'female' một lần nữa. Để làm điều này, ta chỉ cần lặp lại các bước ở phần trước, một lần cho 'male' và một lần cho 'female'. # + Đầu tiên, ta sẽ loại bỏ thuộc tính Sex (giới tính) ra khỏi danh sách các phân tách có thể (vì ta đã sử dụng nó và chỉ có một phân tách có thể cho cột nhị phân đó), và tạo hai nhóm 'male' và 'female': cols.remove("Sex") ismale = trn_df.Sex == 1 males, females = trn_df[ismale], trn_df[~ismale] # Tìm phân chia nhị phân đơn lẻ tốt nhất cho male: {o: min_col(males, o) for o in cols} # và cho females: {o: min_col(females, o) for o in cols} # Ta đã tìm ra được quy tắc phân tách tốt nhất tiếp theo cho hai nhóm giới tính nam và nữ. Đối với nhóm nam, quy tắc phân tách tốt nhất là "Age <= 6", còn đối với nhóm nữ, quy tắc phân tách tốt nhất là "Pclass<=2" # Sau khi thêm các quy tắc này vào, chúng ta đã tạo ra một cây quyết định, trong đó mô hình của chúng ta sẽ đầu tiên kiểm tra xem giới tính là nam hay nữ, và từ đó tùy thuộc vào kết quả sẽ kiểm tra các quy tắc Age hoặc Pclass được đề cập ở trên. Chúng ta có thể lặp lại quá trình này, tạo ra các quy tắc mới cho mỗi trong số bốn nhóm mà chúng ta đã tạo ra. from sklearn.tree import DecisionTreeClassifier, export_graphviz m = DecisionTreeClassifier(max_leaf_nodes=4).fit(trn_xs, trn_y) # Tiếp theo sử dụng lớp DecisionTreeClassifier của thư viện sklearn để tự động tạo cây quyết định. Chúng ta đưa dữ liệu huấn luyện trn_xs và trn_y vào lớp này để học mô hình. max_leaf_nodes=4 là tham số xác định số lượng lá tối đa của cây. Sau khi mô hình được học, chúng ta sử dụng hàm draw_tree để vẽ cây quyết định dưới dạng đồ thị. # import graphviz def draw_tree(t, df, size=10, ratio=0.6, precision=2, **kwargs): s = export_graphviz( t, out_file=None, feature_names=df.columns, filled=True, rounded=True, special_characters=True, rotate=False, precision=precision, **kwargs, ) return graphviz.Source(re.sub("Tree {", f"Tree {{ size={size}; ratio={ratio}", s)) draw_tree(m, trn_xs, size=10) # Ở đây, chúng ta thấy rằng DecisionTreeClassifier tìm ra các phân chia giống hệt như những gì chúng ta đã làm thủ công trước đó. Trong hình ảnh trên, các nút màu cam có tỷ lệ sống sót thấp hơn và màu xanh có tỷ lệ sống sót cao hơn. Mỗi nút cho biết có bao nhiêu hàng ("samples") khớp với tập luật đó và hiển thị số lượng hành khách sống sót hoặc thiệt mạng ("values"). Ngoài ra, còn có một thứ gọi là "gini". Đây là một chỉ số đo lường độ không thuần khiết, tương tự như score() mà chúng ta đã tạo ra trước đó. Gini được tính toán như sau: # def gini(cond): act = df.loc[cond, dep] return 1 - act.mean() ** 2 - (1 - act).mean() ** 2 # Công thức này tính toán khả năng rằng: nếu mà chọn hai hàng từ một nhóm, thì sẽ nhận được kết quả Survived giống nhau mỗi lần. Nếu nhóm đó là giống nhau, xác suất sẽ là 1,0 và 0,0 nếu chúng khác nhau gini(df.Sex == "female"), gini(df.Sex == "male") # So sánh với mô hình OneR: mean_absolute_error(val_y, m.predict(val_xs)) # Mô hình này cho kết quả không tốt bằng. Vì đây là một tập dữ liệu nhỏ (Chỉ có khoảng 200 hàng trong tập dữ liệu) nên sự khác biệt nhỏ này không thực sự có ý nghĩa. Có thể kết quả sẽ tốt hơn nếu tạo một cây lớn hơn: m = DecisionTreeClassifier(min_samples_leaf=50) m.fit(trn_xs, trn_y) draw_tree(m, trn_xs, size=12) mean_absolute_error(val_y, m.predict(val_xs)) # Đã có sự cải tiến nhưng với những bộ dữ liệu nhỏ như vậy vẫn khó có được kết quả cao. # ## The random forest # Mục đích: Muốn các dự đoán của mỗi mô hình trong bộ tổ hợp không tương quan với các mô hình khác.Một cách chúng ta có thể tạo ra một loạt các mô hình không tương quan là huấn luyện từng mô hình trên một tập con ngẫu nhiên khác nhau của dữ liệu. Để có thể tạo ra một cây trên một tập con ngẫu nhiên của dữ liệu. # Định nghĩa một hàm có tên là "get_tree" để tạo ra một cây quyết định trên một tập con ngẫu nhiên của dữ liệu. Hàm này có một đối số là "prop" (tỷ lệ) mặc định là 0.75. # + Đầu tiên, hàm này sẽ lấy độ dài của tập huấn luyện ("trn_y") và lưu vào biến "n". # + Tiếp theo, nó tạo một mảng chỉ mục ngẫu nhiên từ 0 đến "n" với kích thước bằng "n*prop". Điều này đảm bảo rằng chúng ta chỉ chọn một tỷ lệ "prop" (0.75 theo mặc định) các mẫu từ tập huấn luyện. # + Cuối cùng, hàm trả về một cây quyết định được tạo ra bằng cách sử dụng lớp "DecisionTreeClassifier" từ thư viện sklearn. Cây được huấn luyện trên tập dữ liệu con ngẫu nhiên được chọn bằng cách chỉ định chỉ mục "idxs" cho dữ liệu huấn luyện và nhãn tương ứng. # + Trong đó: # - min_samples_leaf=5: là số lượng mẫu tối thiểu cần có ở mỗi lá của cây. Nếu số lượng mẫu ở lá nhỏ hơn giá trị này thì sẽ không tiếp tục phân chia nữa và dừng lại. # - trn_xs.iloc[idxs], trn_y.iloc[idxs]: là tập dữ liệu huấn luyện được lấy ra ngẫu nhiên với tỷ lệ prop=0.75 (75%) từ tập dữ liệu huấn luyện ban đầu. # ->Trong hàm này, các cây quyết định được tạo ra sẽ có ít nhất 5 mẫu lá (min_samples_leaf=5) và sẽ được huấn luyện trên một tập con ngẫu nhiên của dữ liệu huấn luyện # def get_tree(prop=0.75): n = len(trn_y) idxs = random.choice(n, int(n * prop)) return DecisionTreeClassifier(min_samples_leaf=5).fit( trn_xs.iloc[idxs], trn_y.iloc[idxs] ) # + Trong vòng lặp sử dụng hàm “get_tree()” để tạo ra một danh sách gồm có 100 cây quyết định. Trong mỗi vòng lặp, một cây quyết định mới được tạo ra bằng cách gọi hàm "get_tree()" và được thêm vào danh sách cây "trees" # ->Từ đó ta sẽ có một danh sách “trees” trong đó có 100 cây quyết định không tương quan với nhau, và mỗi cây đều được huấn luyện trên một tập con ngẫu nhiên khác nhau của dữ liệu huấn luyện ban đầu. Mục đích của việc tạo ra các cây này là để sau đó lấy trung bình dự đoán của chúng và tạo thành một bộ tổ hợp các mô hình (ensemble model). # trees = [get_tree() for t in range(100)] # + Tạo một danh sách “all_probs” để chứa các dự đoán của tất cả các cây trong bộ tổ hợp trên tập dữ liệu xác thực. Mỗi một cây trong danh sách “trees” đều được dùng để dự đoán đầu ra trên tập dữ liệu xác thực (“val_xs”) và kết quả dự đoán của mỗi cây sẽ thêm vào danh sách “all_prob” # + Tạo danh sách “avg_probs” trong đó sẽ sử dụng hàm “np.stack()” để chuyển đổi danh sách thành một mảng numpy mảng này sẽ có kích thước (số lượng cây, số lượng mẫu xác thực” và chứa các giá trị dự đoán từ tất cả các cây. Sau đó sử dụng phương thức mean của numpy để tính toán giá trị trung bình của các giá trị trong all_probs và kết quả sẽ lưu lại tại “avg_probs”. # + Cuối cùng thì ta sẽ sử dụng phương thức mean_absolute_error để tính toán sai số trung bình tuyệt đối giữa dự đoán trung bình(avg_probs) và nhãn thực tế trên tập dữ liệu xác thực (“val_y”). # =>Từ đó ta sẽ đánh giá được hiệu suất của mô hình cây quyết định bằng cách tính toán trung bình chênh lệch tuyệt đối giữa dự đoán trung bình và giá trị thực tế của mẫu xác thực. # all_probs = [t.predict(val_xs) for t in trees] avg_probs = np.stack(all_probs).mean(0) mean_absolute_error(val_y, avg_probs) # Ta thấy kết quả được tính bởi thư viện sklearn sử dụng RandomForestClassifier là chính xác hơn, tuy nhiên kết quả của Model ta xây dựng cũng cho kết quả gần chính xác. Phần bổ sung chính trong Random Forest là ngoài việc chọn một mẫu dữ liệu ngẫu nhiên cho mỗi cây, nó cũng chọn một tập hợp con ngẫu nhiên các cột cho mỗi lần phân tách. Đây là cách ta lặp lại quy trình trên với một Random Forest: from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(100, min_samples_leaf=5) rf.fit(trn_xs, trn_y) mean_absolute_error(val_y, rf.predict(val_xs)) # Một tính năng đặc biệt của Ramdom Forest là có thể biết biến độc lập nào là quan trọng nhất trong mô hình, bằng cách sử dụng `feature_importances_`: pd.DataFrame(dict(cols=trn_xs.columns, imp=m.feature_importances_)).plot( "cols", "imp", "barh" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/110/129110749.ipynb
null
null
[{"Id": 129110749, "ScriptId": 38317603, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12469052, "CreationDate": "05/11/2023 05:01:42", "VersionNumber": 4.0, "Title": "Nh\u00f3m 2 - B\u00e1o c\u00e1o Kho v\u00e0 khai ph\u00e1 d\u1eef li\u1ec7u", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 352.0, "LinesInsertedFromPrevious": 94.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 258.0, "LinesInsertedFromFork": 153.0, "LinesDeletedFromFork": 151.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 199.0, "TotalVotes": 0}]
null
null
null
null
# ## Dự đoán khả năng sống sót trên tàu Titanic # Vụ đắm tàu RMS Titanic là một trong những vụ đắm tàu khét tiếng nhất trong lịch sử. Vào ngày 15 tháng 4 năm 1912, trong chuyến hành trình đầu tiên của mình, Titanic đã chìm sau khi va chạm với một tảng băng trôi, khiến 1502 trong số 2224 hành khách và thủy thủ đoàn thiệt mạng. Thảm kịch giật gân này đã gây sốc cho cộng đồng quốc tế và dẫn đến các quy định an toàn tốt hơn cho các con tàu. Một trong những nguyên nhân khiến vụ đắm tàu dẫn đến thiệt hại về người như vậy là do không có đủ xuồng cứu sinh cho hành khách và thủy thủ đoàn. Mặc dù có một số yếu tố may mắn liên quan đến việc sống sót sau vụ chìm tàu, nhưng một số nhóm người có nhiều khả năng sống sót hơn những nhóm khác, chẳng hạn như phụ nữ, trẻ em và tầng lớp thượng lưu. Chúng ta cần phân tích xem những loại người nào có khả năng sống sót. # Mô tả bộ dữ liệu sử dụng: # + Hai bộ dữ liệu tương tự bao gồm thông tin về hành khách như tên, tuổi, giới tính, tầng lớp kinh tế xã hội, v.v. Một bộ dữ liệu có tên là train.csv và bộ dữ liệu kia có tên là test.csv. # + Train.csv sẽ chứa thông tin chi tiết về một nhóm nhỏ hành khách trên tàu (chính xác là 891) và quan trọng là sẽ tiết lộ liệu họ có sống sót hay không, còn được gọi là “Survived”. # + Bộ dữ liệu test.csv chứa thông tin tương tự nhưng không tiết lộ “Survived” về mỗi hành khách. # + Sử dụng các mẫu tìm thấy trong dữ liệu train.csv, dự đoán xem 418 hành khách khác trên tàu (tìm thấy trong test.csv) có sống sót hay không. # Mô tả biến: Có 1309 bản ghi của 12 biến. # + Survived: Sống sót (1) hoặc Không (0) # + Pclass: Hạng hành khách # + Name: Tên hành khách # + Sex: Giới tính của hành khách # + Age: Tuổi của hành khách # + SibSp: Số anh chị em/vợ/chồng trên tàu # + Parch: Số phụ huynh/con cái trên tàu # + Ticket: Số vé # + Fare: Giá vé # + Cabin: cabin # + Embarked: Cảng lên tàu # from fastai.imports import * np.set_printoptions(linewidth=130) # ## Tiền xử lý dữ liệu # Import thư viện và dữ liệu. import os iskaggle = os.environ.get("KAGGLE_KERNEL_RUN_TYPE", "") if iskaggle: path = Path("../input/titanic") else: import zipfile, kaggle path = Path("titanic") kaggle.api.competition_download_cli(str(path)) zipfile.ZipFile(f"{path}.zip").extractall(path) df = pd.read_csv(path / "train.csv") tst_df = pd.read_csv(path / "test.csv") # print(df) # print(tst_df) modes = df.mode().iloc[0] # print(modes) # Với các cột không phải số, chuyển đổi các trường đó thành các biến phân loại, bên trong Pandas tạo danh sách tất cả các giá trị duy nhất trong cột và thay thế từng giá trị bằng một số. Số này chỉ là một chỉ mục để tra cứu giá trị trong danh sách tất cả các giá trị duy nhất. # + df['LogFare'] = np.log1p(df['Fare']): Dòng này tính log(x+1) của cột 'Fare' và gán vào cột mới 'LogFare'. Ta làm việc này để làm giảm độ biến thiên giá trị của cột 'Fare' qua đó giúp chuẩn hóa phân phối và còn giúp làm nổi bật các sự khác biệt nhỏ nhằm tăng khả năng phân loại. def proc_data(df): df["Fare"] = df.Fare.fillna(0) df.fillna(modes, inplace=True) df["LogFare"] = np.log1p(df["Fare"]) df["Embarked"] = pd.Categorical(df.Embarked) df["Sex"] = pd.Categorical(df.Sex) proc_data(df) proc_data(tst_df) # print(df.LogFare) # print(df.Embarked) # Tạo một danh sách các biến liên tục(continuous), phân loại(categorical) và phụ thuộc(dependent). Lưu ý rằng không còn coi Pclass là một biến phân loại. Đó là bởi vì nó có thứ tự (tức là lớp 1, 2 và 3 có thứ tự) và cây quyết định, như chúng ta sẽ thấy, chỉ quan tâm đến thứ tự, không quan tâm đến giá trị tuyệt đối. # cats = ["Sex", "Embarked"] conts = ["Age", "SibSp", "Parch", "LogFare", "Pclass"] dep = "Survived" # Ngay cả khi đã tạo cột 'cats' theo phân loại, chúng vẫn được Pandas hiển thị dưới dạng giá trị ban đầu của chúng df.Sex.head() # Tuy nhiên, đằng sau đó, chúng hiện được lưu trữ dưới dạng số nguyên, với các chỉ số được tra cứu trong danh sách 'Category' được hiển thị ở đầu ra ở trên. Chúng ta có thể xem các giá trị được lưu trữ bằng cách xem thuộc tính “cat.codes”: # df.Embarked.cat.codes.head() # ## Chia nhị phân # + Trước khi tạo Random Forest hoặc Gradient Boosting Machine, trước tiên cần tìm hiểu cách tạo Cây quyết định, cái mà cả hai mô hình này được dựa trên. # + Và để tạo cây quyết định, trước tiên chúng ta cần tạo phân chia nhị phân, vì đây là thứ mà cây quyết định được xây dựng từ đó. # + Phân chia nhị phân là nơi tất cả các hàng được đặt vào một trong hai nhóm, dựa trên việc chúng ở trên hay dưới một số ngưỡng của một số cột. Ví dụ: có thể chia các hàng trong tập dữ liệu thành nam và nữ bằng cách sử dụng ngưỡng 0,5 và cột Giới tính (vì các giá trị trong cột là 0 cho nữ và 1 cho nam). Có thể sử dụng một biểu đồ để xem điều đó sẽ phân chia dữ liệu như thế nào. Sử dụng thư viện Seaborn, là một lớp nằm trên matplotlib giúp tạo một số biểu đồ để có thể quan sát dễ dàng hơn # import seaborn as sns fig, axs = plt.subplots(1, 2, figsize=(11, 5)) sns.barplot(data=df, y=dep, x="Sex", ax=axs[0]).set(title="Survival rate") sns.countplot(data=df, x="Sex", ax=axs[1]).set(title="Histogram") # + Ở đây, có thể thấy rằng (ở bên trái) nếu ta chia dữ liệu thành nam và nữ, ta sẽ có các nhóm có tỷ lệ sống sót rất khác nhau: >70% đối với nữ và <20% đối với nam. Ta cũng có thể thấy (ở bên phải) rằng sự phân chia sẽ khá đồng đều, với hơn 300 hành khách (trong số khoảng 900) trong mỗi nhóm. # + Có thể tạo ra một "model" rất đơn giản nói rằng tất cả nữ đều sống sót và không nam nào sống sót. Để làm như vậy, trước tiên nên chia dữ liệu thành một tập train và validation: # from numpy import random from sklearn.model_selection import train_test_split random.seed(42) trn_df, val_df = train_test_split(df, test_size=0.25) trn_df[cats] = trn_df[cats].apply(lambda x: x.cat.codes) val_df[cats] = val_df[cats].apply(lambda x: x.cat.codes) # (Trong bước trước, cũng đã thay thế các biến phân loại bằng mã số nguyên của chúng, vì một số mô hình xây dựng ở dưới yêu cầu điều đó.) # - Bây giờ có thể tạo các biến độc lập (biến x) và phụ thuộc (biến y): # (xs là giá trị các biến của bản ghi (ngoài survived) nếu dep(survived) có trong df thì return ra y( xử lý tránh missing data) ) def xs_y(df): xs = df[cats + conts].copy() return xs, df[dep] if dep in df else None trn_xs, trn_y = xs_y(trn_df) val_xs, val_y = xs_y(val_df) # print(val_xs, val_y) # Hàm kiểm tra giá trị cột Sex trong 'val_xs' nếu giá trị cột Sex bằng 0 thì trả về True và ngược lại: preds = val_xs.Sex == 0 # print(preds) # Sử dụng sai số tuyệt đối trung bình để tính giá trị trung bình của độ lỗi tuyệt đối giữa hai mảng 'val_y' và 'preds' qua đó đánh giá mức độ tốt của mô hình này from sklearn.metrics import mean_absolute_error mean_absolute_error(val_y, preds) # Ngoài ra, có thể thử tách trên một cột liên tục. Sử dụng một biểu đồ khác để xem biểu đồ này có thể hoạt động như thế nào -- đây là một ví dụ về có thể xem ở “LogFare”: # df_fare = trn_df[trn_df.LogFare > 0] fig, axs = plt.subplots(1, 2, figsize=(11, 5)) sns.boxenplot(data=df_fare, x=dep, y="LogFare", ax=axs[0]) sns.kdeplot(data=df_fare, x="LogFare", ax=axs[1]) # + Boxenplot ở trên hiển thị các lượng tử LogFare cho mỗi nhóm Survived==0 và Survived==1. # Nó cho thấy rằng LogFare trung bình cho những hành khách không qua khỏi là khoảng 2,5 và # cho những hành khách đã sống sót là khoảng 3,2. Vì vậy, có vẻ như những người trả nhiều # tiền hơn cho vé của họ có nhiều khả năng được đưa lên xuồng cứu sinh hơn. # + Lấy ngưỡng cho 'LogFare' là 2.7 nếu 'LogFare'>2.7 thì sẽ được gán giá trị True và ngược lại sẽ có giá trị False: preds = val_xs.LogFare > 2.7 # print(preds) # Sử dụng sai số tuyệt đối trung bình để tính giá trị trung bình của độ lỗi tuyệt đối giữa hai mảng 'val_y' và 'preds' qua đó đánh giá mức độ tốt của mô hình này: mean_absolute_error(val_y, preds) # Mô hình này kém chính xác hơn một chút so với mô hình của chúng ta đã sử dụng “Sex” làm phân chia nhị phân. # + Lý tưởng nhất là thử nhiều cột và breakpoint một cách dễ dàng hơn. Có thể tạo một hàm trả về mức độ tốt của mô hình, để thử nhanh hơn một vài cách tách khác nhau. Ta sẽ tạo một hàm 'score' để làm điều này. Thay vì trả về sai số tuyệt đối trung bình, taa sẽ tính toán thước đo độ không thuần khiết -- nghĩa là mức độ phân chia nhị phân tạo ra hai nhóm trong đó các hàng trong một nhóm tương tự nhau hoặc không giống nhau. # + Ta có thể đo mức độ giống nhau của các hàng trong một nhóm bằng cách lấy độ lệch chuẩn của biến phụ thuộc. Nếu nó cao hơn, thì điều đó có nghĩa là các hàng khác nhau nhiều hơn. Sau đó, ta sẽ nhân số này với số lượng hàng, vì nhóm lớn hơn có tác động nhiều hơn nhóm nhỏ hơn: # Hàm này sẽ giúp chúng ta tính độ lệch chuẩn của một mảng sử dụng hàm 'std()' def _side_score(side, y): tot = side.sum() if tot <= 1: return 0 return y[side].std() * tot # Hàm 'score' sẽ giá trị trung bình của độ lệch chuẩn của 2 mảng được tạo ra sao khi chia ngưỡng. Và kết quả cuối cùng sẽ chia cho độ dài của mảng: def score(col, y, split): lhs = col <= split return (_side_score(lhs, y) + _side_score(~lhs, y)) / len(y) # Tính điểm cho 'Sex': score(trn_xs["Sex"], trn_y, 0.5) # Và tính cho `LogFare`: score(trn_xs["LogFare"], trn_y, 2.7) # Giống với kết quả khi tính bằng sai số tuyệt đối, độ lệch chuẩn của 'Sex' nhỏ hơn 'LogFare' nên 'Sex' là mô hình tốt hơn! # Để dễ dàng tìm ra cách phân tách nhị phân tốt nhất, Ta có thể tạo một công cụ tương tác đơn giản (sử dụng trên Kaggle): # def iscore(nm, split): col = trn_xs[nm] return score(col, trn_y, split) from ipywidgets import interact interact(nm=conts, split=15.5)(iscore) # Có thể thử chọn các cột và điểm phân chia khác nhau bằng cách sử dụng menu thả xuống và thanh trượt ở trên ta có thể tìm thấy những phần tách nào làm tăng độ tinh khiết của dữ liệu. interact(nm=cats[0], split=0.5)(iscore) # Mô hình đã hoạt động nhưng để mô hình hoạt động tốt hơn ta sẽ tìm điểm phân chia tốt nhất cho một cột. Ví dụ: để tìm điểm phân chia tốt nhất cho độ tuổi, trước tiên ta sẽ cần lập danh sách tất cả các điểm phân chia có thể có (tức là tất cả các giá trị duy nhất của trường đó): và tìm xem chỉ số nào trong số các giá trị đó có điểm số() thấp nhất # nm = "Age" col = trn_xs[nm] unq = col.unique() unq.sort() unq # Sau đó tìm vị trí có điểm score() thấp nhất scores = np.array([score(col, trn_y, o) for o in unq if not np.isnan(o)]) unq[scores.argmin()] # Ta thấy được là đối với cột “Age”, 6 là ngưỡng tối ưu theo tập train def min_col(df, nm): col, y = df[nm], df[dep] unq = col.dropna().unique() scores = np.array([score(col, y, o) for o in unq if not np.isnan(o)]) idx = scores.argmin() return unq[idx], scores[idx] min_col(trn_df, "Age") # Thử với tất cả các cột khác: # Ta có thể thấy cột 'Sex' với ngưỡng là 0 có số điểm tốt nhất so với các cột còn lại, đúng với dự đoán mô hình ban đầu khi ta cho tất cả những bản ghi có giá trị Sex=0(female) là sống sót. cols = cats + conts {o: min_col(trn_df, o) for o in cols} # Ta có, “Sex” <=0 là cách phân chia tốt nhất mà chúng ta có thể sử dụng. Ta vừa tạo lại bộ phân loại OneR được coi là một trong những bộ phân loại hiệu quả nhất trong bộ dữ liệu thế giới thực. Vì nó rất đơn giản và hiệu quả,đây điểm khởi đầu mà chúng ta có thể sử dụng để so sánh các mô hình phức tạp hơn. # # -> Trước đó, chúng ta đã tính đươc·là quy tắc OneR có sai số khoảng 0,215, vì vậy ta sẽ ghi nhớ lại điều này khi thử các mô hình phức tạp hơn. # ## Cây quyết định # + Để cải thiện mô hình OneR (Mô hình quyết định đơn giản) dự đoán chỉ dựa vào giới tính. Ta có thể thực hiện cải thiện cho bộ phân loại OneR, dự đoán khả năng sống sót dựa trên giới tính, bằng cách tạo thêm một phân tách nhị phân cho từng nhóm 'male' và 'female'. Đó là, tìm phân tách tốt nhất cho 'male' và 'female' một lần nữa. Để làm điều này, ta chỉ cần lặp lại các bước ở phần trước, một lần cho 'male' và một lần cho 'female'. # + Đầu tiên, ta sẽ loại bỏ thuộc tính Sex (giới tính) ra khỏi danh sách các phân tách có thể (vì ta đã sử dụng nó và chỉ có một phân tách có thể cho cột nhị phân đó), và tạo hai nhóm 'male' và 'female': cols.remove("Sex") ismale = trn_df.Sex == 1 males, females = trn_df[ismale], trn_df[~ismale] # Tìm phân chia nhị phân đơn lẻ tốt nhất cho male: {o: min_col(males, o) for o in cols} # và cho females: {o: min_col(females, o) for o in cols} # Ta đã tìm ra được quy tắc phân tách tốt nhất tiếp theo cho hai nhóm giới tính nam và nữ. Đối với nhóm nam, quy tắc phân tách tốt nhất là "Age <= 6", còn đối với nhóm nữ, quy tắc phân tách tốt nhất là "Pclass<=2" # Sau khi thêm các quy tắc này vào, chúng ta đã tạo ra một cây quyết định, trong đó mô hình của chúng ta sẽ đầu tiên kiểm tra xem giới tính là nam hay nữ, và từ đó tùy thuộc vào kết quả sẽ kiểm tra các quy tắc Age hoặc Pclass được đề cập ở trên. Chúng ta có thể lặp lại quá trình này, tạo ra các quy tắc mới cho mỗi trong số bốn nhóm mà chúng ta đã tạo ra. from sklearn.tree import DecisionTreeClassifier, export_graphviz m = DecisionTreeClassifier(max_leaf_nodes=4).fit(trn_xs, trn_y) # Tiếp theo sử dụng lớp DecisionTreeClassifier của thư viện sklearn để tự động tạo cây quyết định. Chúng ta đưa dữ liệu huấn luyện trn_xs và trn_y vào lớp này để học mô hình. max_leaf_nodes=4 là tham số xác định số lượng lá tối đa của cây. Sau khi mô hình được học, chúng ta sử dụng hàm draw_tree để vẽ cây quyết định dưới dạng đồ thị. # import graphviz def draw_tree(t, df, size=10, ratio=0.6, precision=2, **kwargs): s = export_graphviz( t, out_file=None, feature_names=df.columns, filled=True, rounded=True, special_characters=True, rotate=False, precision=precision, **kwargs, ) return graphviz.Source(re.sub("Tree {", f"Tree {{ size={size}; ratio={ratio}", s)) draw_tree(m, trn_xs, size=10) # Ở đây, chúng ta thấy rằng DecisionTreeClassifier tìm ra các phân chia giống hệt như những gì chúng ta đã làm thủ công trước đó. Trong hình ảnh trên, các nút màu cam có tỷ lệ sống sót thấp hơn và màu xanh có tỷ lệ sống sót cao hơn. Mỗi nút cho biết có bao nhiêu hàng ("samples") khớp với tập luật đó và hiển thị số lượng hành khách sống sót hoặc thiệt mạng ("values"). Ngoài ra, còn có một thứ gọi là "gini". Đây là một chỉ số đo lường độ không thuần khiết, tương tự như score() mà chúng ta đã tạo ra trước đó. Gini được tính toán như sau: # def gini(cond): act = df.loc[cond, dep] return 1 - act.mean() ** 2 - (1 - act).mean() ** 2 # Công thức này tính toán khả năng rằng: nếu mà chọn hai hàng từ một nhóm, thì sẽ nhận được kết quả Survived giống nhau mỗi lần. Nếu nhóm đó là giống nhau, xác suất sẽ là 1,0 và 0,0 nếu chúng khác nhau gini(df.Sex == "female"), gini(df.Sex == "male") # So sánh với mô hình OneR: mean_absolute_error(val_y, m.predict(val_xs)) # Mô hình này cho kết quả không tốt bằng. Vì đây là một tập dữ liệu nhỏ (Chỉ có khoảng 200 hàng trong tập dữ liệu) nên sự khác biệt nhỏ này không thực sự có ý nghĩa. Có thể kết quả sẽ tốt hơn nếu tạo một cây lớn hơn: m = DecisionTreeClassifier(min_samples_leaf=50) m.fit(trn_xs, trn_y) draw_tree(m, trn_xs, size=12) mean_absolute_error(val_y, m.predict(val_xs)) # Đã có sự cải tiến nhưng với những bộ dữ liệu nhỏ như vậy vẫn khó có được kết quả cao. # ## The random forest # Mục đích: Muốn các dự đoán của mỗi mô hình trong bộ tổ hợp không tương quan với các mô hình khác.Một cách chúng ta có thể tạo ra một loạt các mô hình không tương quan là huấn luyện từng mô hình trên một tập con ngẫu nhiên khác nhau của dữ liệu. Để có thể tạo ra một cây trên một tập con ngẫu nhiên của dữ liệu. # Định nghĩa một hàm có tên là "get_tree" để tạo ra một cây quyết định trên một tập con ngẫu nhiên của dữ liệu. Hàm này có một đối số là "prop" (tỷ lệ) mặc định là 0.75. # + Đầu tiên, hàm này sẽ lấy độ dài của tập huấn luyện ("trn_y") và lưu vào biến "n". # + Tiếp theo, nó tạo một mảng chỉ mục ngẫu nhiên từ 0 đến "n" với kích thước bằng "n*prop". Điều này đảm bảo rằng chúng ta chỉ chọn một tỷ lệ "prop" (0.75 theo mặc định) các mẫu từ tập huấn luyện. # + Cuối cùng, hàm trả về một cây quyết định được tạo ra bằng cách sử dụng lớp "DecisionTreeClassifier" từ thư viện sklearn. Cây được huấn luyện trên tập dữ liệu con ngẫu nhiên được chọn bằng cách chỉ định chỉ mục "idxs" cho dữ liệu huấn luyện và nhãn tương ứng. # + Trong đó: # - min_samples_leaf=5: là số lượng mẫu tối thiểu cần có ở mỗi lá của cây. Nếu số lượng mẫu ở lá nhỏ hơn giá trị này thì sẽ không tiếp tục phân chia nữa và dừng lại. # - trn_xs.iloc[idxs], trn_y.iloc[idxs]: là tập dữ liệu huấn luyện được lấy ra ngẫu nhiên với tỷ lệ prop=0.75 (75%) từ tập dữ liệu huấn luyện ban đầu. # ->Trong hàm này, các cây quyết định được tạo ra sẽ có ít nhất 5 mẫu lá (min_samples_leaf=5) và sẽ được huấn luyện trên một tập con ngẫu nhiên của dữ liệu huấn luyện # def get_tree(prop=0.75): n = len(trn_y) idxs = random.choice(n, int(n * prop)) return DecisionTreeClassifier(min_samples_leaf=5).fit( trn_xs.iloc[idxs], trn_y.iloc[idxs] ) # + Trong vòng lặp sử dụng hàm “get_tree()” để tạo ra một danh sách gồm có 100 cây quyết định. Trong mỗi vòng lặp, một cây quyết định mới được tạo ra bằng cách gọi hàm "get_tree()" và được thêm vào danh sách cây "trees" # ->Từ đó ta sẽ có một danh sách “trees” trong đó có 100 cây quyết định không tương quan với nhau, và mỗi cây đều được huấn luyện trên một tập con ngẫu nhiên khác nhau của dữ liệu huấn luyện ban đầu. Mục đích của việc tạo ra các cây này là để sau đó lấy trung bình dự đoán của chúng và tạo thành một bộ tổ hợp các mô hình (ensemble model). # trees = [get_tree() for t in range(100)] # + Tạo một danh sách “all_probs” để chứa các dự đoán của tất cả các cây trong bộ tổ hợp trên tập dữ liệu xác thực. Mỗi một cây trong danh sách “trees” đều được dùng để dự đoán đầu ra trên tập dữ liệu xác thực (“val_xs”) và kết quả dự đoán của mỗi cây sẽ thêm vào danh sách “all_prob” # + Tạo danh sách “avg_probs” trong đó sẽ sử dụng hàm “np.stack()” để chuyển đổi danh sách thành một mảng numpy mảng này sẽ có kích thước (số lượng cây, số lượng mẫu xác thực” và chứa các giá trị dự đoán từ tất cả các cây. Sau đó sử dụng phương thức mean của numpy để tính toán giá trị trung bình của các giá trị trong all_probs và kết quả sẽ lưu lại tại “avg_probs”. # + Cuối cùng thì ta sẽ sử dụng phương thức mean_absolute_error để tính toán sai số trung bình tuyệt đối giữa dự đoán trung bình(avg_probs) và nhãn thực tế trên tập dữ liệu xác thực (“val_y”). # =>Từ đó ta sẽ đánh giá được hiệu suất của mô hình cây quyết định bằng cách tính toán trung bình chênh lệch tuyệt đối giữa dự đoán trung bình và giá trị thực tế của mẫu xác thực. # all_probs = [t.predict(val_xs) for t in trees] avg_probs = np.stack(all_probs).mean(0) mean_absolute_error(val_y, avg_probs) # Ta thấy kết quả được tính bởi thư viện sklearn sử dụng RandomForestClassifier là chính xác hơn, tuy nhiên kết quả của Model ta xây dựng cũng cho kết quả gần chính xác. Phần bổ sung chính trong Random Forest là ngoài việc chọn một mẫu dữ liệu ngẫu nhiên cho mỗi cây, nó cũng chọn một tập hợp con ngẫu nhiên các cột cho mỗi lần phân tách. Đây là cách ta lặp lại quy trình trên với một Random Forest: from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(100, min_samples_leaf=5) rf.fit(trn_xs, trn_y) mean_absolute_error(val_y, rf.predict(val_xs)) # Một tính năng đặc biệt của Ramdom Forest là có thể biết biến độc lập nào là quan trọng nhất trong mô hình, bằng cách sử dụng `feature_importances_`: pd.DataFrame(dict(cols=trn_xs.columns, imp=m.feature_importances_)).plot( "cols", "imp", "barh" )
false
0
9,448
0
9,448
9,448
129167855
# # Cross-Encoder # ![ce](https://weaviate.io/assets/images/cross-encoder-61b340b8d1f6359bb7650da2c59be11c.png) # ## Cos'è un cross-encoder? # Un [Cross-Encoder](https://arxiv.org/abs/1908.10084) è un classificatore di coppie di frasi. Richiede in input una coppia di testi e ne classifica la similarità mediante un indice tra 0 e 1. A differenza dei [Bi-Encoder](https://arxiv.org/abs/1908.10084) non calcola gli embeddings della frase. # ## Differenza tra bi-encoder e cross-encoder # Un **Bi-Encoder** prende in input una frase alla volta e ne calcola l'embedding. Per capire la similarità tra frasi su questi embeddings va calcolata una misura di similarità come la [similarità del coseno](https://it.wikipedia.org/wiki/Coseno_di_similitudine) o il [prodotto scalare](https://it.wikipedia.org/wiki/Prodotto_scalare). # Un **Cross-Encoder** prende in input una coppia di frasi simultaneamente, non ne calcola gli embeddings, ma le classifica secondo un indice di similarità compreso tra 0 e 1. # ![bicross](https://raw.githubusercontent.com/UKPLab/sentence-transformers/master/docs/img/Bi_vs_Cross-Encoder.png) # > N.B. se si vuole applicare il prodotto scalare assicurarsi che gli embeddings siano normalizzati # ## Training di un Cross-Encoder usando [SBERT](https://www.sbert.net/) # Di seguito un esempio della creazione di un Cross-Encoder per l'italiano con la libreria sentence-transformers. Come per i Bi-Encoder ci sono diversi metodi per farlo a seconda del dataset scelto, qui verrà usato sempre un dataset formato Semantic Textual Similarity (STS) benchmark. # ### Dataset # A seguito dello scaricamento del [dataset da Huggingface Hub](https://huggingface.co/datasets/stsb_multi_mt/viewer/it/train) bisogna preparare le coppie di frasi per il modello. # Il processo è molto simile a quello del Bi-Encoder ma qui, nel dataset di train le coppie vanno inserite in entrambe le combinazione quindi: # * ([frase1, frase2], sim) # * ([frase2, frase1], sim) # perchè lo score deve essere simmetrico. # Per il dataset di valutazione e/o test non c'è bisogno. # > Per maggiori info sulle altre tecniche consultare gli [esempi](https://www.sbert.net/examples/training/cross-encoder/README.html#examples). from datasets import load_dataset from sentence_transformers import InputExample from torch.utils.data import DataLoader dataset_train = load_dataset("stsb_multi_mt", name="it", split="train") dataset_test = load_dataset("stsb_multi_mt", name="it", split="test") gold_samples = [] batch_size = 16 for df in dataset_train: score = float(df["similarity_score"]) / 5.0 gold_samples.append( InputExample(texts=[df["sentence1"], df["sentence2"]], label=score) ) gold_samples.append( InputExample(texts=[df["sentence2"], df["sentence1"]], label=score) ) train_dataloader = DataLoader(gold_samples, shuffle=True, batch_size=batch_size) from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator import math # We add an evaluator, which evaluates the performance during training evaluator = CECorrelationEvaluator( [[x["sentence1"], x["sentence2"]] for x in dataset_test], [x / 5.0 for x in dataset_test["similarity_score"]], ) # ## Train del modello # Partendo sempre da un [BERT per l'italiano](https://huggingface.co/dbmdz/bert-base-italian-uncased) creiamo il nostro cross-encoder inizializzando l'head mediante il numero di etichette da predire. # > Nel nostro caso, modello STS abbiamo una solo etichetta, ovvero lo score. Se avessimo avuto un dataset formato *NLI ("contradiction", "entailment", "neutral")* avremmo inserito il numero delle etichette presenti. # Scelte il numero di epoche di addestramento e i warmup steps possiamo iniziare l'addestramento. from sentence_transformers.cross_encoder import CrossEncoder model_checkpoint = "dbmdz/bert-base-italian-uncased" cross_encoder = CrossEncoder(model_checkpoint, num_labels=1) num_epochs = 4 evaluation_steps = 500 warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) # 10% of train data cross_encoder.fit( train_dataloader=train_dataloader, evaluator=evaluator, epochs=num_epochs, evaluation_steps=evaluation_steps, warmup_steps=warmup_steps, save_best_model=True, output_path="cross-encoder-italian-bert-stsb/", ) # ### Evaluation # Solitamente i Cross-Encoder hanno prestazioni migliori sull'inferenza rispetto ai Bi-Encoder, in questo caso non ci siamo focalizzati sulla metrica infatti il circa 81% di accuratezza non è altissimo per questo modello. evaluator(cross_encoder)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/167/129167855.ipynb
null
null
[{"Id": 129167855, "ScriptId": 37694657, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3451576, "CreationDate": "05/11/2023 13:47:19", "VersionNumber": 8.0, "Title": "sentence_transformer_cross_encoder", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 138.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 138.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Cross-Encoder # ![ce](https://weaviate.io/assets/images/cross-encoder-61b340b8d1f6359bb7650da2c59be11c.png) # ## Cos'è un cross-encoder? # Un [Cross-Encoder](https://arxiv.org/abs/1908.10084) è un classificatore di coppie di frasi. Richiede in input una coppia di testi e ne classifica la similarità mediante un indice tra 0 e 1. A differenza dei [Bi-Encoder](https://arxiv.org/abs/1908.10084) non calcola gli embeddings della frase. # ## Differenza tra bi-encoder e cross-encoder # Un **Bi-Encoder** prende in input una frase alla volta e ne calcola l'embedding. Per capire la similarità tra frasi su questi embeddings va calcolata una misura di similarità come la [similarità del coseno](https://it.wikipedia.org/wiki/Coseno_di_similitudine) o il [prodotto scalare](https://it.wikipedia.org/wiki/Prodotto_scalare). # Un **Cross-Encoder** prende in input una coppia di frasi simultaneamente, non ne calcola gli embeddings, ma le classifica secondo un indice di similarità compreso tra 0 e 1. # ![bicross](https://raw.githubusercontent.com/UKPLab/sentence-transformers/master/docs/img/Bi_vs_Cross-Encoder.png) # > N.B. se si vuole applicare il prodotto scalare assicurarsi che gli embeddings siano normalizzati # ## Training di un Cross-Encoder usando [SBERT](https://www.sbert.net/) # Di seguito un esempio della creazione di un Cross-Encoder per l'italiano con la libreria sentence-transformers. Come per i Bi-Encoder ci sono diversi metodi per farlo a seconda del dataset scelto, qui verrà usato sempre un dataset formato Semantic Textual Similarity (STS) benchmark. # ### Dataset # A seguito dello scaricamento del [dataset da Huggingface Hub](https://huggingface.co/datasets/stsb_multi_mt/viewer/it/train) bisogna preparare le coppie di frasi per il modello. # Il processo è molto simile a quello del Bi-Encoder ma qui, nel dataset di train le coppie vanno inserite in entrambe le combinazione quindi: # * ([frase1, frase2], sim) # * ([frase2, frase1], sim) # perchè lo score deve essere simmetrico. # Per il dataset di valutazione e/o test non c'è bisogno. # > Per maggiori info sulle altre tecniche consultare gli [esempi](https://www.sbert.net/examples/training/cross-encoder/README.html#examples). from datasets import load_dataset from sentence_transformers import InputExample from torch.utils.data import DataLoader dataset_train = load_dataset("stsb_multi_mt", name="it", split="train") dataset_test = load_dataset("stsb_multi_mt", name="it", split="test") gold_samples = [] batch_size = 16 for df in dataset_train: score = float(df["similarity_score"]) / 5.0 gold_samples.append( InputExample(texts=[df["sentence1"], df["sentence2"]], label=score) ) gold_samples.append( InputExample(texts=[df["sentence2"], df["sentence1"]], label=score) ) train_dataloader = DataLoader(gold_samples, shuffle=True, batch_size=batch_size) from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator import math # We add an evaluator, which evaluates the performance during training evaluator = CECorrelationEvaluator( [[x["sentence1"], x["sentence2"]] for x in dataset_test], [x / 5.0 for x in dataset_test["similarity_score"]], ) # ## Train del modello # Partendo sempre da un [BERT per l'italiano](https://huggingface.co/dbmdz/bert-base-italian-uncased) creiamo il nostro cross-encoder inizializzando l'head mediante il numero di etichette da predire. # > Nel nostro caso, modello STS abbiamo una solo etichetta, ovvero lo score. Se avessimo avuto un dataset formato *NLI ("contradiction", "entailment", "neutral")* avremmo inserito il numero delle etichette presenti. # Scelte il numero di epoche di addestramento e i warmup steps possiamo iniziare l'addestramento. from sentence_transformers.cross_encoder import CrossEncoder model_checkpoint = "dbmdz/bert-base-italian-uncased" cross_encoder = CrossEncoder(model_checkpoint, num_labels=1) num_epochs = 4 evaluation_steps = 500 warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) # 10% of train data cross_encoder.fit( train_dataloader=train_dataloader, evaluator=evaluator, epochs=num_epochs, evaluation_steps=evaluation_steps, warmup_steps=warmup_steps, save_best_model=True, output_path="cross-encoder-italian-bert-stsb/", ) # ### Evaluation # Solitamente i Cross-Encoder hanno prestazioni migliori sull'inferenza rispetto ai Bi-Encoder, in questo caso non ci siamo focalizzati sulla metrica infatti il circa 81% di accuratezza non è altissimo per questo modello. evaluator(cross_encoder)
false
0
1,448
0
1,448
1,448
129167277
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns diabetes = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv") diabetes.head() diabetes.isnull().sum() diabetes.info() xcols = diabetes.columns xcols for x in xcols: if diabetes[x].dtypes == "int64" or diabetes[x].dtypes == "float64": plt.hist(diabetes[x]) plt.xlabel(x) plt.ylabel("count") plt.show() x = diabetes.iloc[:, :-1] x y = diabetes.iloc[:, -1] y from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.30) xtrain.shape, xtest.shape, ytrain.shape, ytest.shape from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV dec = DecisionTreeClassifier(random_state=15) params = {"min_samples_leaf": [1, 2, 3, 4], "max_depth": [1, 2, 3]} grid = GridSearchCV(estimator=dec, param_grid=params, cv=3, scoring="roc_auc") grid.fit(xtrain, ytrain) grid.best_score_ grid.best_estimator_ grid.best_params_ prediction = grid.predict(xtest) from sklearn.metrics import accuracy_score print(accuracy_score(ytest, prediction)) from sklearn.metrics import confusion_matrix print(confusion_matrix(ytest, prediction)) from sklearn import tree tree.plot_tree(grid.best_estimator_, fontsize=6)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/167/129167277.ipynb
null
null
[{"Id": 129167277, "ScriptId": 38400396, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13314142, "CreationDate": "05/11/2023 13:42:29", "VersionNumber": 1.0, "Title": "Diabetes Dataset - Decision Tree Classifier", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns diabetes = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv") diabetes.head() diabetes.isnull().sum() diabetes.info() xcols = diabetes.columns xcols for x in xcols: if diabetes[x].dtypes == "int64" or diabetes[x].dtypes == "float64": plt.hist(diabetes[x]) plt.xlabel(x) plt.ylabel("count") plt.show() x = diabetes.iloc[:, :-1] x y = diabetes.iloc[:, -1] y from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.30) xtrain.shape, xtest.shape, ytrain.shape, ytest.shape from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV dec = DecisionTreeClassifier(random_state=15) params = {"min_samples_leaf": [1, 2, 3, 4], "max_depth": [1, 2, 3]} grid = GridSearchCV(estimator=dec, param_grid=params, cv=3, scoring="roc_auc") grid.fit(xtrain, ytrain) grid.best_score_ grid.best_estimator_ grid.best_params_ prediction = grid.predict(xtest) from sklearn.metrics import accuracy_score print(accuracy_score(ytest, prediction)) from sklearn.metrics import confusion_matrix print(confusion_matrix(ytest, prediction)) from sklearn import tree tree.plot_tree(grid.best_estimator_, fontsize=6)
false
0
448
0
448
448
129167896
# # MPG Prediction Model # ## (Regression problem) # ## A 3 layer prediction model for the MPG in the **Auto MPG Data Set** provided by the **UCI Machine Learning Repository**. # #### # ### **First Layer**: A Normalizer layer for the five features: # * ### Cylinders # * ### Displacement # * ### Horsepower # * ### Weight # * ### Acceleration # ### **Second Layer**: A 25 Neuron layer with a activation of RELU # ### **Third Layer**: A single neuron output layer # #### # ### The model has a Adam optimiser and Mean Squared Error. # ### I trained the model for 100 epochs with a batch size of 2 and learning rate of 0.001. # ### The model achieved a mean squared error of 13.44. import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt # Load Data column_names = [ "mpg", "cylinders", "displacement", "horsepower", "weight", "acceleration", "model year", "origin", "car name", ] dataset = pd.read_csv( "/kaggle/input/auto-mpg-dataset/auto-mpg.data", names=column_names, na_values="?", comment="\t", sep=" ", skipinitialspace=True, ) dataset = dataset.drop(columns=["car name", "model year", "origin"]) dataset = dataset.dropna() dataset.describe().transpose() # Split Data train_data = dataset.sample(frac=0.7, random_state=0) test_data = dataset.drop(train_data.index) train_features = train_data.copy() train_labels = train_features.pop("mpg") test_features = test_data.copy() test_labels = test_features.pop("mpg") print("training: ", train_data.shape, train_features.shape, train_labels.shape) print("testing: ", test_data.shape, test_features.shape, test_labels.shape) # Plotting the Data against MPG fig, axs = plt.subplots(2, 3, figsize=(15, 10)) axs[0, 0].scatter(train_features["acceleration"], train_labels) axs[0, 0].set_xlabel("acceleration") axs[0, 0].set_ylabel("mpg") axs[0, 1].scatter(train_features["cylinders"], train_labels) axs[0, 1].set_xlabel("cylinders") axs[0, 1].set_ylabel("mpg") axs[0, 2].scatter(train_features["displacement"], train_labels) axs[0, 2].set_xlabel("displacement") axs[0, 2].set_ylabel("mpg") axs[1, 0].scatter(train_features["horsepower"], train_labels) axs[1, 0].set_xlabel("horsepower") axs[1, 0].set_ylabel("mpg") axs[1, 1].scatter(train_features["weight"], train_labels) axs[1, 1].set_xlabel("weight") axs[1, 1].set_ylabel("mpg") # Normalizer Layer normalizer = tf.keras.layers.Normalization() normalizer.adapt(np.array(train_features)) print(train_features.describe()) print(pd.DataFrame(normalizer(np.array(train_features)).numpy()).head()) # Actual Model model = tf.keras.models.Sequential( [ normalizer, tf.keras.layers.Dense(units=25, activation="relu"), tf.keras.layers.Dense(units=1), ] ) print(model.summary()) # Model Compilation loss = tf.keras.losses.MeanSquaredError() optim = tf.keras.optimizers.Adam(learning_rate=0.001) model.compile(optimizer=optim, loss=loss) # Model Training batches = 2 epochs = 100 history = model.fit( train_features, train_labels, batch_size=batches, epochs=epochs, validation_split=0.1, verbose=2, ) # Plotting the Loss plt.plot(history.history["loss"], label="loss") plt.plot(history.history["val_loss"], label="val_loss") plt.legend() plt.show() # Evaluation of Model loss = model.evaluate(test_features, test_labels) print("Loss: ", loss) # Predicting Some sample Values columns = ["cylinders", "displacement", "horsepower", "weight", "acceleration"] starting = [] stoping = [] for i in range(len(columns)): starting.append(min(test_data[columns[i]])) stoping.append(max(test_data[columns[i]])) data = tf.linspace(start=starting, stop=stoping, num=100) prediction = model.predict(data) fig, axs = plt.subplots(1, 2, figsize=(15, 5)) axs[0].plot(normalizer(np.array(data)).numpy(), label=columns) axs[0].set_ylabel("normalised values") axs[0].legend() axs[1].plot(prediction, label="mpg") axs[1].legend()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/167/129167896.ipynb
null
null
[{"Id": 129167896, "ScriptId": 35907724, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11469091, "CreationDate": "05/11/2023 13:47:44", "VersionNumber": 1.0, "Title": "MPG Prediction Model", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 128.0, "LinesInsertedFromPrevious": 128.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # MPG Prediction Model # ## (Regression problem) # ## A 3 layer prediction model for the MPG in the **Auto MPG Data Set** provided by the **UCI Machine Learning Repository**. # #### # ### **First Layer**: A Normalizer layer for the five features: # * ### Cylinders # * ### Displacement # * ### Horsepower # * ### Weight # * ### Acceleration # ### **Second Layer**: A 25 Neuron layer with a activation of RELU # ### **Third Layer**: A single neuron output layer # #### # ### The model has a Adam optimiser and Mean Squared Error. # ### I trained the model for 100 epochs with a batch size of 2 and learning rate of 0.001. # ### The model achieved a mean squared error of 13.44. import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt # Load Data column_names = [ "mpg", "cylinders", "displacement", "horsepower", "weight", "acceleration", "model year", "origin", "car name", ] dataset = pd.read_csv( "/kaggle/input/auto-mpg-dataset/auto-mpg.data", names=column_names, na_values="?", comment="\t", sep=" ", skipinitialspace=True, ) dataset = dataset.drop(columns=["car name", "model year", "origin"]) dataset = dataset.dropna() dataset.describe().transpose() # Split Data train_data = dataset.sample(frac=0.7, random_state=0) test_data = dataset.drop(train_data.index) train_features = train_data.copy() train_labels = train_features.pop("mpg") test_features = test_data.copy() test_labels = test_features.pop("mpg") print("training: ", train_data.shape, train_features.shape, train_labels.shape) print("testing: ", test_data.shape, test_features.shape, test_labels.shape) # Plotting the Data against MPG fig, axs = plt.subplots(2, 3, figsize=(15, 10)) axs[0, 0].scatter(train_features["acceleration"], train_labels) axs[0, 0].set_xlabel("acceleration") axs[0, 0].set_ylabel("mpg") axs[0, 1].scatter(train_features["cylinders"], train_labels) axs[0, 1].set_xlabel("cylinders") axs[0, 1].set_ylabel("mpg") axs[0, 2].scatter(train_features["displacement"], train_labels) axs[0, 2].set_xlabel("displacement") axs[0, 2].set_ylabel("mpg") axs[1, 0].scatter(train_features["horsepower"], train_labels) axs[1, 0].set_xlabel("horsepower") axs[1, 0].set_ylabel("mpg") axs[1, 1].scatter(train_features["weight"], train_labels) axs[1, 1].set_xlabel("weight") axs[1, 1].set_ylabel("mpg") # Normalizer Layer normalizer = tf.keras.layers.Normalization() normalizer.adapt(np.array(train_features)) print(train_features.describe()) print(pd.DataFrame(normalizer(np.array(train_features)).numpy()).head()) # Actual Model model = tf.keras.models.Sequential( [ normalizer, tf.keras.layers.Dense(units=25, activation="relu"), tf.keras.layers.Dense(units=1), ] ) print(model.summary()) # Model Compilation loss = tf.keras.losses.MeanSquaredError() optim = tf.keras.optimizers.Adam(learning_rate=0.001) model.compile(optimizer=optim, loss=loss) # Model Training batches = 2 epochs = 100 history = model.fit( train_features, train_labels, batch_size=batches, epochs=epochs, validation_split=0.1, verbose=2, ) # Plotting the Loss plt.plot(history.history["loss"], label="loss") plt.plot(history.history["val_loss"], label="val_loss") plt.legend() plt.show() # Evaluation of Model loss = model.evaluate(test_features, test_labels) print("Loss: ", loss) # Predicting Some sample Values columns = ["cylinders", "displacement", "horsepower", "weight", "acceleration"] starting = [] stoping = [] for i in range(len(columns)): starting.append(min(test_data[columns[i]])) stoping.append(max(test_data[columns[i]])) data = tf.linspace(start=starting, stop=stoping, num=100) prediction = model.predict(data) fig, axs = plt.subplots(1, 2, figsize=(15, 5)) axs[0].plot(normalizer(np.array(data)).numpy(), label=columns) axs[0].set_ylabel("normalised values") axs[0].legend() axs[1].plot(prediction, label="mpg") axs[1].legend()
false
0
1,327
0
1,327
1,327
129060617
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error import warnings warnings.filterwarnings("ignore") data = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") y = data["yield"] X = data.drop(["yield"], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42, test_size=0.2 ) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) lr = LinearRegression() ridge = Ridge() lasso = Lasso() dt = DecisionTreeRegressor() rt = RandomForestRegressor() xgb = XGBRegressor() models = [ LinearRegression(), Ridge(), Lasso(), DecisionTreeRegressor(), RandomForestRegressor(), XGBRegressor(), ] def model_score(model): model.fit(X_train, y_train) pred = model.predict(X_test) score = mean_absolute_error(y_test, pred) return score def tuning_random(model, param): rs = RandomizedSearchCV( estimator=model, param_distributions=param, n_jobs=-1, cv=5, scoring="neg_mean_absolute_error", ) rs.fit(X_train, y_train) return rs.best_params_ def tuning_grid(model, param): gs = GridSearchCV( estimator=model, param_grid=param, n_jobs=-1, cv=10, scoring="neg_mean_absolute_error", ) gs.fit(X_train, y_train) return gs.best_params_ # # **Linear Regresion** lr.get_params() linear_param_grid = {"fit_intercept": [True, False], "copy_X": [True, False]} tuning_grid(lr, linear_param_grid) linear_error = mean_absolute_error( y_test, LinearRegression(fit_intercept=True, copy_X=True) .fit(X_train, y_train) .predict(X_test), ) # # **Ridge and Lasso** ridge_lasso_params = {"alpha": np.arange(0, 501, 1)} tuning_grid(ridge, ridge_lasso_params) tuning_grid(lasso, ridge_lasso_params) ridge_error = mean_absolute_error( y_test, Ridge(alpha=4).fit(X_train, y_train).predict(X_test) ) lasso_error = mean_absolute_error( y_test, Lasso(alpha=1).fit(X_train, y_train).predict(X_test) ) # # **DecisionTreeRegressor** dt.get_params() dt_params = { "min_samples_leaf": np.arange(1, 21, 1), "min_samples_split": np.arange(1, 21, 1), "criterion": ["squared_error", "absolute_error"], "max_depth": [2, 4, 6, 8, 10, 12], } tuning_random(dt, dt_params) dt_tun = DecisionTreeRegressor( min_samples_split=6, min_samples_leaf=19, max_depth=6, criterion="absolute_error" ) dt_error = mean_absolute_error(y_test, dt_tun.fit(X_train, y_train).predict(X_test)) dt_error # # **Random Forest** rt.get_params() rt_params = { "n_estimators": [100, 300, 500, 1000], "max_depth": [5, 10, 20, 30, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_features": ["auto", "sqrt", "log2"], "n_jobs": [-1], } tuning_random(rt, rt_params) rt_error = mean_absolute_error( y_test, RandomForestRegressor( n_jobs=-1, max_features="auto", max_depth=30, min_samples_split=2, min_samples_leaf=4, n_estimators=100, ) .fit(X_train, y_train) .predict(X_test), ) # # **XGB** xgb_param = { "max_depth": [3, 4, 5, 6, 7], "learning_rate": [0.01, 0.05, 0.1, 0.3, 0.5], "subsample": [0.6, 0.7, 0.8, 0.9, 1.0], "colsample_bytree": [0.6, 0.7, 0.8, 0.9, 1.0], "gamma": [0, 0.1, 0.2, 0.3, 0.4], "n_estimators": [100, 500, 1000, 2000, 5000], "n_jobs": [-1], } tuning_random(xgb, xgb_param) xgb_error = mean_absolute_error( y_test, XGBRegressor( subsample=0.8, n_jobs=-1, n_estimators=1000, max_depth=7, learning_rate=0.01, gamma=0.3, colsample_bytree=1, ) .fit(X_train, y_train) .predict(X_test), ) # # **Gradient Boosting** # grid_params = { "n_estimators": [100, 300, 500], "max_depth": [3, 5, 10, 20, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_leaf_nodes": [10, 20, 30, 40, 50], "subsample": [0.6, 0.7, 0.8, 0.9, 1.0], "max_features": [1.0, "sqrt", "log2"], "loss": ["absolute_error"], "learning_rate": [0.01, 0.05, 0.1, 0.3, 0.5], } tuning_random(GradientBoostingRegressor(random_state=10), grid_params) mean_absolute_error( y_test, GradientBoostingRegressor( n_estimators=300, min_samples_split=6, min_samples_leaf=2, subsample=1.0, max_leaf_nodes=30, max_features="auto", loss="absolute_error", max_depth=20, learning_rate=0.05, min_weight_fraction_leaf=0.001, ) .fit(X_train, y_train) .predict(X_test), ) score_dict = { "LinearRegression": linear_error, "Ridge": ridge_error, "Lasso": lasso_error, "Decision Tree": dt_error, "RandomForestRegressor": rt_error, "XGBoost": xgb_error, } pd.DataFrame([score_dict]) dt_sub = DecisionTreeRegressor( min_samples_split=6, min_samples_leaf=13, max_depth=6, criterion="absolute_error" ) gbr_tunn = ( GradientBoostingRegressor( n_estimators=300, min_samples_split=6, min_samples_leaf=2, subsample=1.0, max_leaf_nodes=30, max_features="auto", loss="absolute_error", max_depth=20, learning_rate=0.05, min_weight_fraction_leaf=0.001, ) .fit(X_train, y_train) .predict(X_test_test) ) # # **Submission** X_test_test = test.drop(["id"], axis=1) X_test_test = sc.fit_transform(X_test_test) gbr = GradientBoostingRegressor( n_estimators=100, min_samples_split=7, min_samples_leaf=4, max_features="auto", max_depth=3, learning_rate=0.07, ).fit(X_train, y_train) sub_pred = gbr.predict(X_test_test) submission_dict = {"id": test["id"], "yield": gbr_tunn} sub_df = pd.DataFrame(submission_dict) a = sub_df.to_csv("submission.csv", index=False) sub_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/060/129060617.ipynb
null
null
[{"Id": 129060617, "ScriptId": 38202268, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13198683, "CreationDate": "05/10/2023 16:48:20", "VersionNumber": 1.0, "Title": "Prediction of Wild Blueberry Yield", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 195.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error import warnings warnings.filterwarnings("ignore") data = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") y = data["yield"] X = data.drop(["yield"], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42, test_size=0.2 ) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) lr = LinearRegression() ridge = Ridge() lasso = Lasso() dt = DecisionTreeRegressor() rt = RandomForestRegressor() xgb = XGBRegressor() models = [ LinearRegression(), Ridge(), Lasso(), DecisionTreeRegressor(), RandomForestRegressor(), XGBRegressor(), ] def model_score(model): model.fit(X_train, y_train) pred = model.predict(X_test) score = mean_absolute_error(y_test, pred) return score def tuning_random(model, param): rs = RandomizedSearchCV( estimator=model, param_distributions=param, n_jobs=-1, cv=5, scoring="neg_mean_absolute_error", ) rs.fit(X_train, y_train) return rs.best_params_ def tuning_grid(model, param): gs = GridSearchCV( estimator=model, param_grid=param, n_jobs=-1, cv=10, scoring="neg_mean_absolute_error", ) gs.fit(X_train, y_train) return gs.best_params_ # # **Linear Regresion** lr.get_params() linear_param_grid = {"fit_intercept": [True, False], "copy_X": [True, False]} tuning_grid(lr, linear_param_grid) linear_error = mean_absolute_error( y_test, LinearRegression(fit_intercept=True, copy_X=True) .fit(X_train, y_train) .predict(X_test), ) # # **Ridge and Lasso** ridge_lasso_params = {"alpha": np.arange(0, 501, 1)} tuning_grid(ridge, ridge_lasso_params) tuning_grid(lasso, ridge_lasso_params) ridge_error = mean_absolute_error( y_test, Ridge(alpha=4).fit(X_train, y_train).predict(X_test) ) lasso_error = mean_absolute_error( y_test, Lasso(alpha=1).fit(X_train, y_train).predict(X_test) ) # # **DecisionTreeRegressor** dt.get_params() dt_params = { "min_samples_leaf": np.arange(1, 21, 1), "min_samples_split": np.arange(1, 21, 1), "criterion": ["squared_error", "absolute_error"], "max_depth": [2, 4, 6, 8, 10, 12], } tuning_random(dt, dt_params) dt_tun = DecisionTreeRegressor( min_samples_split=6, min_samples_leaf=19, max_depth=6, criterion="absolute_error" ) dt_error = mean_absolute_error(y_test, dt_tun.fit(X_train, y_train).predict(X_test)) dt_error # # **Random Forest** rt.get_params() rt_params = { "n_estimators": [100, 300, 500, 1000], "max_depth": [5, 10, 20, 30, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_features": ["auto", "sqrt", "log2"], "n_jobs": [-1], } tuning_random(rt, rt_params) rt_error = mean_absolute_error( y_test, RandomForestRegressor( n_jobs=-1, max_features="auto", max_depth=30, min_samples_split=2, min_samples_leaf=4, n_estimators=100, ) .fit(X_train, y_train) .predict(X_test), ) # # **XGB** xgb_param = { "max_depth": [3, 4, 5, 6, 7], "learning_rate": [0.01, 0.05, 0.1, 0.3, 0.5], "subsample": [0.6, 0.7, 0.8, 0.9, 1.0], "colsample_bytree": [0.6, 0.7, 0.8, 0.9, 1.0], "gamma": [0, 0.1, 0.2, 0.3, 0.4], "n_estimators": [100, 500, 1000, 2000, 5000], "n_jobs": [-1], } tuning_random(xgb, xgb_param) xgb_error = mean_absolute_error( y_test, XGBRegressor( subsample=0.8, n_jobs=-1, n_estimators=1000, max_depth=7, learning_rate=0.01, gamma=0.3, colsample_bytree=1, ) .fit(X_train, y_train) .predict(X_test), ) # # **Gradient Boosting** # grid_params = { "n_estimators": [100, 300, 500], "max_depth": [3, 5, 10, 20, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_leaf_nodes": [10, 20, 30, 40, 50], "subsample": [0.6, 0.7, 0.8, 0.9, 1.0], "max_features": [1.0, "sqrt", "log2"], "loss": ["absolute_error"], "learning_rate": [0.01, 0.05, 0.1, 0.3, 0.5], } tuning_random(GradientBoostingRegressor(random_state=10), grid_params) mean_absolute_error( y_test, GradientBoostingRegressor( n_estimators=300, min_samples_split=6, min_samples_leaf=2, subsample=1.0, max_leaf_nodes=30, max_features="auto", loss="absolute_error", max_depth=20, learning_rate=0.05, min_weight_fraction_leaf=0.001, ) .fit(X_train, y_train) .predict(X_test), ) score_dict = { "LinearRegression": linear_error, "Ridge": ridge_error, "Lasso": lasso_error, "Decision Tree": dt_error, "RandomForestRegressor": rt_error, "XGBoost": xgb_error, } pd.DataFrame([score_dict]) dt_sub = DecisionTreeRegressor( min_samples_split=6, min_samples_leaf=13, max_depth=6, criterion="absolute_error" ) gbr_tunn = ( GradientBoostingRegressor( n_estimators=300, min_samples_split=6, min_samples_leaf=2, subsample=1.0, max_leaf_nodes=30, max_features="auto", loss="absolute_error", max_depth=20, learning_rate=0.05, min_weight_fraction_leaf=0.001, ) .fit(X_train, y_train) .predict(X_test_test) ) # # **Submission** X_test_test = test.drop(["id"], axis=1) X_test_test = sc.fit_transform(X_test_test) gbr = GradientBoostingRegressor( n_estimators=100, min_samples_split=7, min_samples_leaf=4, max_features="auto", max_depth=3, learning_rate=0.07, ).fit(X_train, y_train) sub_pred = gbr.predict(X_test_test) submission_dict = {"id": test["id"], "yield": gbr_tunn} sub_df = pd.DataFrame(submission_dict) a = sub_df.to_csv("submission.csv", index=False) sub_df
false
0
2,392
0
2,392
2,392
129017201
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # load the data set df = pd.read_csv("/kaggle/input/titanic/train.csv") # check the first 3 rows of the data df.head(3) # now we can remove it (passenger id column from the data set) df.drop("PassengerId", axis=1, inplace=True) df.head(3) # now we can convert the Embarked column to numeric # since we have 3 options like C, Q and S, we can use the one-hot-encoding # instead of the one hot encoding transformation from pandas we can use get_dummies from sklearn temp = pd.get_dummies(df["Embarked"]) # now we can add the new columns to the original data frame df = pd.concat([df, temp], axis=1) # we dont need the nominal column anymore, so lets remove it df.drop("Embarked", axis=1, inplace=True) df.head(3) # similar to the 'Embarked' column, we transform the Sex column to numeric # this case we have a risk of dummy variable trap # so we simply use the pandas replacer temp = df["Sex"].replace({"male": 1, "female": 0}) # remove the nominal column from data set df.drop("Sex", axis=1, inplace=True) # add transformed data set to the original dataset df = pd.concat([df, temp], axis=1) df.head(3) # lets check if the name column contains any of these titles. temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) # please note that we use \ symbol before . because it is a regular expression # now lets convert the outcome to numeric temp = temp.to_frame().replace({True: 1, False: 0}) # remove the original name column now, we dont neet it anymore df.drop("Name", axis=1, inplace=True) # concatenate the original data frame with the titles df = pd.concat([df, temp], axis=1) df.head(3) df.drop(["Cabin", "Ticket"], axis=1, inplace=True) df.head(3) # lets implement the min-max scaler and set min = 0 and max = 1 from sklearn.preprocessing import MinMaxScaler mms = MinMaxScaler() df = pd.DataFrame(mms.fit_transform(df), columns=df.columns) # lets check the maximum, avarega values df.describe() # now we can implement the knn imputer and fill the blanks with the average of 5 nearest neighbors from sklearn.impute import KNNImputer knni = KNNImputer() df = pd.DataFrame(knni.fit_transform(df), columns=df.columns) df.head(3) # lets try to build a linear regression between age and survived columns from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(df[["Age"]], df[["Survived"]]) lr.coef_ # or we can create another matrix from the intercept values (which is the b value in y = ax+b formula) regmat = [] for col in df.columns: satirlar = [] for row in df.columns: lr.fit(df[[row]], df[col]) x = lr.intercept_ satirlar += [round(x, 2)] regmat += [satirlar] sonuc = pd.DataFrame(regmat, columns=df.columns, index=df.columns) sonuc # another technique for the feature importance is using a machine learning and getting the # feature importances. from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() X = df.drop("Survived", axis=1) y = df["Survived"] rfc.fit(X, y) pred = rfc.predict(X) from sklearn.metrics import accuracy_score acc = accuracy_score(y, pred) acc from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) rfc.fit(X_train, y_train) pred = rfc.predict(X_test) acc = accuracy_score(y_test, pred) acc # lets get cv = 3 so the data set will be divided into 3 parts and 2 of them will be the training set from sklearn.model_selection import cross_validate scores = cross_validate(rfc, X, y, cv=3, scoring=("accuracy"), return_train_score=True) scores # Algorithm importing and definitions from sklearn.linear_model import LogisticRegression lr = LogisticRegression() from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() from sklearn.svm import SVC svc = SVC() from sklearn.cluster import KMeans km = KMeans() from sklearn.ensemble import GradientBoostingClassifier gbc = GradientBoostingClassifier() # we create a list for algorihthms and another list for the name of the algorithms algos = [lr, gnb, knn, dtc, svc, km, gbc, rfc] algo_names = [ "LogisticRegression", "GaussianNB", "KNeighborsClassifier", "DecisionTreeClassifier", "SVC", "KMeans", "GradientBoostingClassifier", "RandomForestClassifier", ] # lets get the score of each algorithm by using the accuracy scores = [] for i in range(len(algos)): temp = cross_validate( algos[i], X, y, cv=3, scoring=("accuracy"), return_train_score=True ) scores += [algo_names[i], temp] scores df = pd.read_csv("/kaggle/input/titanic/train.csv") temp = pd.get_dummies(df["Embarked"]) df = pd.concat([df, temp], axis=1) df.drop("Embarked", axis=1, inplace=True) temp = df["Sex"].replace({"male": 1, "female": 0}) df.drop("Sex", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) temp = temp.to_frame().replace({True: 1, False: 0}) df.drop("Name", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) df.drop(["Cabin", "Ticket", "PassengerId"], axis=1, inplace=True) from sklearn.preprocessing import MinMaxScaler from sklearn.impute import KNNImputer from sklearn.pipeline import Pipeline pipe = Pipeline( [ ("Min Max Scaler", MinMaxScaler()), ("KNN Imputer", KNNImputer()), ("GradientBoostingClassifier", GradientBoostingClassifier()), ] ) X = df.drop("Survived", axis=1) y = df["Survived"] pipe.fit(X, y) y_pred = pipe.predict(X) from sklearn.metrics import accuracy_score acc = accuracy_score(y, y_pred) acc # load the test data set df = pd.read_csv("/kaggle/input/titanic/test.csv") # check the first 3 rows of the data df.head(3) temp = pd.get_dummies(df["Embarked"]) df = pd.concat([df, temp], axis=1) df.drop("Embarked", axis=1, inplace=True) temp = df["Sex"].replace({"male": 1, "female": 0}) df.drop("Sex", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) temp = temp.to_frame().replace({True: 1, False: 0}) df.drop("Name", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) df.drop(["Cabin", "Ticket", "PassengerId"], axis=1, inplace=True) sonuclar = pipe.predict(df) df = pd.read_csv("/kaggle/input/titanic/test.csv") df = df[["PassengerId"]] df["Survived"] = sonuclar df.to_csv("sonuclar.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/017/129017201.ipynb
null
null
[{"Id": 129017201, "ScriptId": 38350646, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15035340, "CreationDate": "05/10/2023 10:42:18", "VersionNumber": 3.0, "Title": "Titanic", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 218.0, "LinesInsertedFromPrevious": 200.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 18.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # load the data set df = pd.read_csv("/kaggle/input/titanic/train.csv") # check the first 3 rows of the data df.head(3) # now we can remove it (passenger id column from the data set) df.drop("PassengerId", axis=1, inplace=True) df.head(3) # now we can convert the Embarked column to numeric # since we have 3 options like C, Q and S, we can use the one-hot-encoding # instead of the one hot encoding transformation from pandas we can use get_dummies from sklearn temp = pd.get_dummies(df["Embarked"]) # now we can add the new columns to the original data frame df = pd.concat([df, temp], axis=1) # we dont need the nominal column anymore, so lets remove it df.drop("Embarked", axis=1, inplace=True) df.head(3) # similar to the 'Embarked' column, we transform the Sex column to numeric # this case we have a risk of dummy variable trap # so we simply use the pandas replacer temp = df["Sex"].replace({"male": 1, "female": 0}) # remove the nominal column from data set df.drop("Sex", axis=1, inplace=True) # add transformed data set to the original dataset df = pd.concat([df, temp], axis=1) df.head(3) # lets check if the name column contains any of these titles. temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) # please note that we use \ symbol before . because it is a regular expression # now lets convert the outcome to numeric temp = temp.to_frame().replace({True: 1, False: 0}) # remove the original name column now, we dont neet it anymore df.drop("Name", axis=1, inplace=True) # concatenate the original data frame with the titles df = pd.concat([df, temp], axis=1) df.head(3) df.drop(["Cabin", "Ticket"], axis=1, inplace=True) df.head(3) # lets implement the min-max scaler and set min = 0 and max = 1 from sklearn.preprocessing import MinMaxScaler mms = MinMaxScaler() df = pd.DataFrame(mms.fit_transform(df), columns=df.columns) # lets check the maximum, avarega values df.describe() # now we can implement the knn imputer and fill the blanks with the average of 5 nearest neighbors from sklearn.impute import KNNImputer knni = KNNImputer() df = pd.DataFrame(knni.fit_transform(df), columns=df.columns) df.head(3) # lets try to build a linear regression between age and survived columns from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(df[["Age"]], df[["Survived"]]) lr.coef_ # or we can create another matrix from the intercept values (which is the b value in y = ax+b formula) regmat = [] for col in df.columns: satirlar = [] for row in df.columns: lr.fit(df[[row]], df[col]) x = lr.intercept_ satirlar += [round(x, 2)] regmat += [satirlar] sonuc = pd.DataFrame(regmat, columns=df.columns, index=df.columns) sonuc # another technique for the feature importance is using a machine learning and getting the # feature importances. from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() X = df.drop("Survived", axis=1) y = df["Survived"] rfc.fit(X, y) pred = rfc.predict(X) from sklearn.metrics import accuracy_score acc = accuracy_score(y, pred) acc from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) rfc.fit(X_train, y_train) pred = rfc.predict(X_test) acc = accuracy_score(y_test, pred) acc # lets get cv = 3 so the data set will be divided into 3 parts and 2 of them will be the training set from sklearn.model_selection import cross_validate scores = cross_validate(rfc, X, y, cv=3, scoring=("accuracy"), return_train_score=True) scores # Algorithm importing and definitions from sklearn.linear_model import LogisticRegression lr = LogisticRegression() from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() from sklearn.svm import SVC svc = SVC() from sklearn.cluster import KMeans km = KMeans() from sklearn.ensemble import GradientBoostingClassifier gbc = GradientBoostingClassifier() # we create a list for algorihthms and another list for the name of the algorithms algos = [lr, gnb, knn, dtc, svc, km, gbc, rfc] algo_names = [ "LogisticRegression", "GaussianNB", "KNeighborsClassifier", "DecisionTreeClassifier", "SVC", "KMeans", "GradientBoostingClassifier", "RandomForestClassifier", ] # lets get the score of each algorithm by using the accuracy scores = [] for i in range(len(algos)): temp = cross_validate( algos[i], X, y, cv=3, scoring=("accuracy"), return_train_score=True ) scores += [algo_names[i], temp] scores df = pd.read_csv("/kaggle/input/titanic/train.csv") temp = pd.get_dummies(df["Embarked"]) df = pd.concat([df, temp], axis=1) df.drop("Embarked", axis=1, inplace=True) temp = df["Sex"].replace({"male": 1, "female": 0}) df.drop("Sex", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) temp = temp.to_frame().replace({True: 1, False: 0}) df.drop("Name", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) df.drop(["Cabin", "Ticket", "PassengerId"], axis=1, inplace=True) from sklearn.preprocessing import MinMaxScaler from sklearn.impute import KNNImputer from sklearn.pipeline import Pipeline pipe = Pipeline( [ ("Min Max Scaler", MinMaxScaler()), ("KNN Imputer", KNNImputer()), ("GradientBoostingClassifier", GradientBoostingClassifier()), ] ) X = df.drop("Survived", axis=1) y = df["Survived"] pipe.fit(X, y) y_pred = pipe.predict(X) from sklearn.metrics import accuracy_score acc = accuracy_score(y, y_pred) acc # load the test data set df = pd.read_csv("/kaggle/input/titanic/test.csv") # check the first 3 rows of the data df.head(3) temp = pd.get_dummies(df["Embarked"]) df = pd.concat([df, temp], axis=1) df.drop("Embarked", axis=1, inplace=True) temp = df["Sex"].replace({"male": 1, "female": 0}) df.drop("Sex", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) temp = ( df["Name"].str.contains("Mr\.") | df["Name"].str.contains("Dr\.") | df["Name"].str.contains("Sir") ) temp = temp.to_frame().replace({True: 1, False: 0}) df.drop("Name", axis=1, inplace=True) df = pd.concat([df, temp], axis=1) df.drop(["Cabin", "Ticket", "PassengerId"], axis=1, inplace=True) sonuclar = pipe.predict(df) df = pd.read_csv("/kaggle/input/titanic/test.csv") df = df[["PassengerId"]] df["Survived"] = sonuclar df.to_csv("sonuclar.csv", index=False)
false
0
2,361
0
2,361
2,361
129017471
<jupyter_start><jupyter_text>Mobile Price Classification ### Context Bob has started his own mobile company. He wants to give tough fight to big companies like Apple,Samsung etc. He does not know how to estimate price of mobiles his company creates. In this competitive mobile phone market you cannot simply assume things. To solve this problem he collects sales data of mobile phones of various companies. Bob wants to find out some relation between features of a mobile phone(eg:- RAM,Internal Memory etc) and its selling price. But he is not so good at Machine Learning. So he needs your help to solve this problem. In this problem you do not have to predict actual price but a price range indicating how high the price is Kaggle dataset identifier: mobile-price-classification <jupyter_code>import pandas as pd df = pd.read_csv('mobile-price-classification/train.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 2000 entries, 0 to 1999 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 battery_power 2000 non-null int64 1 blue 2000 non-null int64 2 clock_speed 2000 non-null float64 3 dual_sim 2000 non-null int64 4 fc 2000 non-null int64 5 four_g 2000 non-null int64 6 int_memory 2000 non-null int64 7 m_dep 2000 non-null float64 8 mobile_wt 2000 non-null int64 9 n_cores 2000 non-null int64 10 pc 2000 non-null int64 11 px_height 2000 non-null int64 12 px_width 2000 non-null int64 13 ram 2000 non-null int64 14 sc_h 2000 non-null int64 15 sc_w 2000 non-null int64 16 talk_time 2000 non-null int64 17 three_g 2000 non-null int64 18 touch_screen 2000 non-null int64 19 wifi 2000 non-null int64 20 price_range 2000 non-null int64 dtypes: float64(2), int64(19) memory usage: 328.2 KB <jupyter_text>Examples: { "battery_power": 842.0, "blue": 0.0, "clock_speed": 2.2, "dual_sim": 0.0, "fc": 1.0, "four_g": 0.0, "int_memory": 7.0, "m_dep": 0.6000000000000001, "mobile_wt": 188.0, "n_cores": 2.0, "pc": 2.0, "px_height": 20.0, "px_width": 756.0, "ram": 2549.0, "sc_h": 9.0, "sc_w": 7.0, "talk_time": 19.0, "three_g": 0.0, "touch_screen": 0.0, "wifi": 1.0, "...": "and 1 more columns" } { "battery_power": 1021.0, "blue": 1.0, "clock_speed": 0.5, "dual_sim": 1.0, "fc": 0.0, "four_g": 1.0, "int_memory": 53.0, "m_dep": 0.7000000000000001, "mobile_wt": 136.0, "n_cores": 3.0, "pc": 6.0, "px_height": 905.0, "px_width": 1988.0, "ram": 2631.0, "sc_h": 17.0, "sc_w": 3.0, "talk_time": 7.0, "three_g": 1.0, "touch_screen": 1.0, "wifi": 0.0, "...": "and 1 more columns" } { "battery_power": 563.0, "blue": 1.0, "clock_speed": 0.5, "dual_sim": 1.0, "fc": 2.0, "four_g": 1.0, "int_memory": 41.0, "m_dep": 0.9, "mobile_wt": 145.0, "n_cores": 5.0, "pc": 6.0, "px_height": 1263.0, "px_width": 1716.0, "ram": 2603.0, "sc_h": 11.0, "sc_w": 2.0, "talk_time": 9.0, "three_g": 1.0, "touch_screen": 1.0, "wifi": 0.0, "...": "and 1 more columns" } { "battery_power": 615.0, "blue": 1.0, "clock_speed": 2.5, "dual_sim": 0.0, "fc": 0.0, "four_g": 0.0, "int_memory": 10.0, "m_dep": 0.8, "mobile_wt": 131.0, "n_cores": 6.0, "pc": 9.0, "px_height": 1216.0, "px_width": 1786.0, "ram": 2769.0, "sc_h": 16.0, "sc_w": 8.0, "talk_time": 11.0, "three_g": 1.0, "touch_screen": 0.0, "wifi": 0.0, "...": "and 1 more columns" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv("/kaggle/input/mobile-price-classification/train.csv") train train.describe() train.isnull().sum() import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["fc"]) Q1 = train["fc"].quantile(0.25) Q3 = train["fc"].quantile(0.75) IQR = Q3 - Q1 train = train[(train["fc"] >= Q1 - 1.5 * IQR) & (train["fc"] <= Q3 + 1.5 * IQR)] import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["fc"]) train.info() # **Дерево решений** X = train.drop("price_range", axis=1) y = train["price_range"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) from sklearn.tree import DecisionTreeClassifier clf = tree.DecisionTreeClassifier(random_state=8, max_depth=3) clf.fit(X_train, y_train) clf.score(X_test, y_test) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay predictions = clf.predict(X_test) cm = confusion_matrix(y_test, predictions, labels=clf.classes_) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=clf.classes_) disp.plot() # **Интерпретируем и визуализируем модель.** from sklearn import tree plt.figure(figsize=(20, 10)) tree.plot_tree(clf) plt.show() clf.feature_importances_ clf.feature_names_in_ # **Случайный лес** from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Градиентный бустинг** from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier(max_depth=1, random_state=7).fit(X_train, y_train) clf.score(X_test, y_test) params = { "max_depth": [1], "max_features": range(1, 20), "learning_rate": [0.1, 0.5, 1], } from sklearn.model_selection import RandomizedSearchCV random_search = RandomizedSearchCV(GradientBoostingClassifier(), params, n_iter=10) clf_random = random_search.fit(X_train, y_train) random_search.best_score_, random_search.best_params_
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/017/129017471.ipynb
mobile-price-classification
iabhishekofficial
[{"Id": 129017471, "ScriptId": 38350847, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6692497, "CreationDate": "05/10/2023 10:44:30", "VersionNumber": 1.0, "Title": "Task4_Mobile", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 104.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184702142, "KernelVersionId": 129017471, "SourceDatasetVersionId": 15520}]
[{"Id": 15520, "DatasetId": 11167, "DatasourceVersionId": 15520, "CreatorUserId": 907764, "LicenseName": "Unknown", "CreationDate": "01/28/2018 08:44:24", "VersionNumber": 1.0, "Title": "Mobile Price Classification", "Slug": "mobile-price-classification", "Subtitle": "Classify Mobile Price Range", "Description": "### Context\n\nBob has started his own mobile company. He wants to give tough fight to big companies like Apple,Samsung etc.\n\nHe does not know how to estimate price of mobiles his company creates. In this competitive mobile phone market you cannot simply assume things. To solve this problem he collects sales data of mobile phones of various companies.\n\nBob wants to find out some relation between features of a mobile phone(eg:- RAM,Internal Memory etc) and its selling price. But he is not so good at Machine Learning. So he needs your help to solve this problem.\n\nIn this problem you do not have to predict actual price but a price range indicating how high the price is", "VersionNotes": "Initial release", "TotalCompressedBytes": 186253.0, "TotalUncompressedBytes": 186253.0}]
[{"Id": 11167, "CreatorUserId": 907764, "OwnerUserId": 907764.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 15520.0, "CurrentDatasourceVersionId": 15520.0, "ForumId": 18557, "Type": 2, "CreationDate": "01/28/2018 08:44:24", "LastActivityDate": "02/06/2018", "TotalViews": 793378, "TotalDownloads": 143007, "TotalVotes": 1700, "TotalKernels": 3248}]
[{"Id": 907764, "UserName": "iabhishekofficial", "DisplayName": "Abhishek Sharma", "RegisterDate": "02/11/2017", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv("/kaggle/input/mobile-price-classification/train.csv") train train.describe() train.isnull().sum() import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["fc"]) Q1 = train["fc"].quantile(0.25) Q3 = train["fc"].quantile(0.75) IQR = Q3 - Q1 train = train[(train["fc"] >= Q1 - 1.5 * IQR) & (train["fc"] <= Q3 + 1.5 * IQR)] import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["fc"]) train.info() # **Дерево решений** X = train.drop("price_range", axis=1) y = train["price_range"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) from sklearn.tree import DecisionTreeClassifier clf = tree.DecisionTreeClassifier(random_state=8, max_depth=3) clf.fit(X_train, y_train) clf.score(X_test, y_test) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay predictions = clf.predict(X_test) cm = confusion_matrix(y_test, predictions, labels=clf.classes_) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=clf.classes_) disp.plot() # **Интерпретируем и визуализируем модель.** from sklearn import tree plt.figure(figsize=(20, 10)) tree.plot_tree(clf) plt.show() clf.feature_importances_ clf.feature_names_in_ # **Случайный лес** from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Градиентный бустинг** from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier(max_depth=1, random_state=7).fit(X_train, y_train) clf.score(X_test, y_test) params = { "max_depth": [1], "max_features": range(1, 20), "learning_rate": [0.1, 0.5, 1], } from sklearn.model_selection import RandomizedSearchCV random_search = RandomizedSearchCV(GradientBoostingClassifier(), params, n_iter=10) clf_random = random_search.fit(X_train, y_train) random_search.best_score_, random_search.best_params_
[{"mobile-price-classification/train.csv": {"column_names": "[\"battery_power\", \"blue\", \"clock_speed\", \"dual_sim\", \"fc\", \"four_g\", \"int_memory\", \"m_dep\", \"mobile_wt\", \"n_cores\", \"pc\", \"px_height\", \"px_width\", \"ram\", \"sc_h\", \"sc_w\", \"talk_time\", \"three_g\", \"touch_screen\", \"wifi\", \"price_range\"]", "column_data_types": "{\"battery_power\": \"int64\", \"blue\": \"int64\", \"clock_speed\": \"float64\", \"dual_sim\": \"int64\", \"fc\": \"int64\", \"four_g\": \"int64\", \"int_memory\": \"int64\", \"m_dep\": \"float64\", \"mobile_wt\": \"int64\", \"n_cores\": \"int64\", \"pc\": \"int64\", \"px_height\": \"int64\", \"px_width\": \"int64\", \"ram\": \"int64\", \"sc_h\": \"int64\", \"sc_w\": \"int64\", \"talk_time\": \"int64\", \"three_g\": \"int64\", \"touch_screen\": \"int64\", \"wifi\": \"int64\", \"price_range\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2000 entries, 0 to 1999\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 battery_power 2000 non-null int64 \n 1 blue 2000 non-null int64 \n 2 clock_speed 2000 non-null float64\n 3 dual_sim 2000 non-null int64 \n 4 fc 2000 non-null int64 \n 5 four_g 2000 non-null int64 \n 6 int_memory 2000 non-null int64 \n 7 m_dep 2000 non-null float64\n 8 mobile_wt 2000 non-null int64 \n 9 n_cores 2000 non-null int64 \n 10 pc 2000 non-null int64 \n 11 px_height 2000 non-null int64 \n 12 px_width 2000 non-null int64 \n 13 ram 2000 non-null int64 \n 14 sc_h 2000 non-null int64 \n 15 sc_w 2000 non-null int64 \n 16 talk_time 2000 non-null int64 \n 17 three_g 2000 non-null int64 \n 18 touch_screen 2000 non-null int64 \n 19 wifi 2000 non-null int64 \n 20 price_range 2000 non-null int64 \ndtypes: float64(2), int64(19)\nmemory usage: 328.2 KB\n", "summary": "{\"battery_power\": {\"count\": 2000.0, \"mean\": 1238.5185, \"std\": 439.41820608353135, \"min\": 501.0, \"25%\": 851.75, \"50%\": 1226.0, \"75%\": 1615.25, \"max\": 1998.0}, \"blue\": {\"count\": 2000.0, \"mean\": 0.495, \"std\": 0.5001000400170075, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"clock_speed\": {\"count\": 2000.0, \"mean\": 1.52225, \"std\": 0.8160042088950689, \"min\": 0.5, \"25%\": 0.7, \"50%\": 1.5, \"75%\": 2.2, \"max\": 3.0}, \"dual_sim\": {\"count\": 2000.0, \"mean\": 0.5095, \"std\": 0.500034766175005, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"fc\": {\"count\": 2000.0, \"mean\": 4.3095, \"std\": 4.341443747983894, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 7.0, \"max\": 19.0}, \"four_g\": {\"count\": 2000.0, \"mean\": 0.5215, \"std\": 0.49966246736236386, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"int_memory\": {\"count\": 2000.0, \"mean\": 32.0465, \"std\": 18.145714955206856, \"min\": 2.0, \"25%\": 16.0, \"50%\": 32.0, \"75%\": 48.0, \"max\": 64.0}, \"m_dep\": {\"count\": 2000.0, \"mean\": 0.50175, \"std\": 0.2884155496235117, \"min\": 0.1, \"25%\": 0.2, \"50%\": 0.5, \"75%\": 0.8, \"max\": 1.0}, \"mobile_wt\": {\"count\": 2000.0, \"mean\": 140.249, \"std\": 35.39965489638835, \"min\": 80.0, \"25%\": 109.0, \"50%\": 141.0, \"75%\": 170.0, \"max\": 200.0}, \"n_cores\": {\"count\": 2000.0, \"mean\": 4.5205, \"std\": 2.2878367180426604, \"min\": 1.0, \"25%\": 3.0, \"50%\": 4.0, \"75%\": 7.0, \"max\": 8.0}, \"pc\": {\"count\": 2000.0, \"mean\": 9.9165, \"std\": 6.06431494134778, \"min\": 0.0, \"25%\": 5.0, \"50%\": 10.0, \"75%\": 15.0, \"max\": 20.0}, \"px_height\": {\"count\": 2000.0, \"mean\": 645.108, \"std\": 443.7808108064386, \"min\": 0.0, \"25%\": 282.75, \"50%\": 564.0, \"75%\": 947.25, \"max\": 1960.0}, \"px_width\": {\"count\": 2000.0, \"mean\": 1251.5155, \"std\": 432.19944694633796, \"min\": 500.0, \"25%\": 874.75, \"50%\": 1247.0, \"75%\": 1633.0, \"max\": 1998.0}, \"ram\": {\"count\": 2000.0, \"mean\": 2124.213, \"std\": 1084.7320436099494, \"min\": 256.0, \"25%\": 1207.5, \"50%\": 2146.5, \"75%\": 3064.5, \"max\": 3998.0}, \"sc_h\": {\"count\": 2000.0, \"mean\": 12.3065, \"std\": 4.213245004356306, \"min\": 5.0, \"25%\": 9.0, \"50%\": 12.0, \"75%\": 16.0, \"max\": 19.0}, \"sc_w\": {\"count\": 2000.0, \"mean\": 5.767, \"std\": 4.3563976058264045, \"min\": 0.0, \"25%\": 2.0, \"50%\": 5.0, \"75%\": 9.0, \"max\": 18.0}, \"talk_time\": {\"count\": 2000.0, \"mean\": 11.011, \"std\": 5.463955197766688, \"min\": 2.0, \"25%\": 6.0, \"50%\": 11.0, \"75%\": 16.0, \"max\": 20.0}, \"three_g\": {\"count\": 2000.0, \"mean\": 0.7615, \"std\": 0.42627292231873126, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"touch_screen\": {\"count\": 2000.0, \"mean\": 0.503, \"std\": 0.500116044562674, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"wifi\": {\"count\": 2000.0, \"mean\": 0.507, \"std\": 0.5000760322381083, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"price_range\": {\"count\": 2000.0, \"mean\": 1.5, \"std\": 1.118313602106461, \"min\": 0.0, \"25%\": 0.75, \"50%\": 1.5, \"75%\": 2.25, \"max\": 3.0}}", "examples": "{\"battery_power\":{\"0\":842,\"1\":1021,\"2\":563,\"3\":615},\"blue\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"clock_speed\":{\"0\":2.2,\"1\":0.5,\"2\":0.5,\"3\":2.5},\"dual_sim\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"fc\":{\"0\":1,\"1\":0,\"2\":2,\"3\":0},\"four_g\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"int_memory\":{\"0\":7,\"1\":53,\"2\":41,\"3\":10},\"m_dep\":{\"0\":0.6,\"1\":0.7,\"2\":0.9,\"3\":0.8},\"mobile_wt\":{\"0\":188,\"1\":136,\"2\":145,\"3\":131},\"n_cores\":{\"0\":2,\"1\":3,\"2\":5,\"3\":6},\"pc\":{\"0\":2,\"1\":6,\"2\":6,\"3\":9},\"px_height\":{\"0\":20,\"1\":905,\"2\":1263,\"3\":1216},\"px_width\":{\"0\":756,\"1\":1988,\"2\":1716,\"3\":1786},\"ram\":{\"0\":2549,\"1\":2631,\"2\":2603,\"3\":2769},\"sc_h\":{\"0\":9,\"1\":17,\"2\":11,\"3\":16},\"sc_w\":{\"0\":7,\"1\":3,\"2\":2,\"3\":8},\"talk_time\":{\"0\":19,\"1\":7,\"2\":9,\"3\":11},\"three_g\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"touch_screen\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"wifi\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"price_range\":{\"0\":1,\"1\":2,\"2\":2,\"3\":2}}"}}]
true
1
<start_data_description><data_path>mobile-price-classification/train.csv: <column_names> ['battery_power', 'blue', 'clock_speed', 'dual_sim', 'fc', 'four_g', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time', 'three_g', 'touch_screen', 'wifi', 'price_range'] <column_types> {'battery_power': 'int64', 'blue': 'int64', 'clock_speed': 'float64', 'dual_sim': 'int64', 'fc': 'int64', 'four_g': 'int64', 'int_memory': 'int64', 'm_dep': 'float64', 'mobile_wt': 'int64', 'n_cores': 'int64', 'pc': 'int64', 'px_height': 'int64', 'px_width': 'int64', 'ram': 'int64', 'sc_h': 'int64', 'sc_w': 'int64', 'talk_time': 'int64', 'three_g': 'int64', 'touch_screen': 'int64', 'wifi': 'int64', 'price_range': 'int64'} <dataframe_Summary> {'battery_power': {'count': 2000.0, 'mean': 1238.5185, 'std': 439.41820608353135, 'min': 501.0, '25%': 851.75, '50%': 1226.0, '75%': 1615.25, 'max': 1998.0}, 'blue': {'count': 2000.0, 'mean': 0.495, 'std': 0.5001000400170075, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'clock_speed': {'count': 2000.0, 'mean': 1.52225, 'std': 0.8160042088950689, 'min': 0.5, '25%': 0.7, '50%': 1.5, '75%': 2.2, 'max': 3.0}, 'dual_sim': {'count': 2000.0, 'mean': 0.5095, 'std': 0.500034766175005, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'fc': {'count': 2000.0, 'mean': 4.3095, 'std': 4.341443747983894, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 7.0, 'max': 19.0}, 'four_g': {'count': 2000.0, 'mean': 0.5215, 'std': 0.49966246736236386, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'int_memory': {'count': 2000.0, 'mean': 32.0465, 'std': 18.145714955206856, 'min': 2.0, '25%': 16.0, '50%': 32.0, '75%': 48.0, 'max': 64.0}, 'm_dep': {'count': 2000.0, 'mean': 0.50175, 'std': 0.2884155496235117, 'min': 0.1, '25%': 0.2, '50%': 0.5, '75%': 0.8, 'max': 1.0}, 'mobile_wt': {'count': 2000.0, 'mean': 140.249, 'std': 35.39965489638835, 'min': 80.0, '25%': 109.0, '50%': 141.0, '75%': 170.0, 'max': 200.0}, 'n_cores': {'count': 2000.0, 'mean': 4.5205, 'std': 2.2878367180426604, 'min': 1.0, '25%': 3.0, '50%': 4.0, '75%': 7.0, 'max': 8.0}, 'pc': {'count': 2000.0, 'mean': 9.9165, 'std': 6.06431494134778, 'min': 0.0, '25%': 5.0, '50%': 10.0, '75%': 15.0, 'max': 20.0}, 'px_height': {'count': 2000.0, 'mean': 645.108, 'std': 443.7808108064386, 'min': 0.0, '25%': 282.75, '50%': 564.0, '75%': 947.25, 'max': 1960.0}, 'px_width': {'count': 2000.0, 'mean': 1251.5155, 'std': 432.19944694633796, 'min': 500.0, '25%': 874.75, '50%': 1247.0, '75%': 1633.0, 'max': 1998.0}, 'ram': {'count': 2000.0, 'mean': 2124.213, 'std': 1084.7320436099494, 'min': 256.0, '25%': 1207.5, '50%': 2146.5, '75%': 3064.5, 'max': 3998.0}, 'sc_h': {'count': 2000.0, 'mean': 12.3065, 'std': 4.213245004356306, 'min': 5.0, '25%': 9.0, '50%': 12.0, '75%': 16.0, 'max': 19.0}, 'sc_w': {'count': 2000.0, 'mean': 5.767, 'std': 4.3563976058264045, 'min': 0.0, '25%': 2.0, '50%': 5.0, '75%': 9.0, 'max': 18.0}, 'talk_time': {'count': 2000.0, 'mean': 11.011, 'std': 5.463955197766688, 'min': 2.0, '25%': 6.0, '50%': 11.0, '75%': 16.0, 'max': 20.0}, 'three_g': {'count': 2000.0, 'mean': 0.7615, 'std': 0.42627292231873126, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'touch_screen': {'count': 2000.0, 'mean': 0.503, 'std': 0.500116044562674, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'wifi': {'count': 2000.0, 'mean': 0.507, 'std': 0.5000760322381083, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'price_range': {'count': 2000.0, 'mean': 1.5, 'std': 1.118313602106461, 'min': 0.0, '25%': 0.75, '50%': 1.5, '75%': 2.25, 'max': 3.0}} <dataframe_info> RangeIndex: 2000 entries, 0 to 1999 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 battery_power 2000 non-null int64 1 blue 2000 non-null int64 2 clock_speed 2000 non-null float64 3 dual_sim 2000 non-null int64 4 fc 2000 non-null int64 5 four_g 2000 non-null int64 6 int_memory 2000 non-null int64 7 m_dep 2000 non-null float64 8 mobile_wt 2000 non-null int64 9 n_cores 2000 non-null int64 10 pc 2000 non-null int64 11 px_height 2000 non-null int64 12 px_width 2000 non-null int64 13 ram 2000 non-null int64 14 sc_h 2000 non-null int64 15 sc_w 2000 non-null int64 16 talk_time 2000 non-null int64 17 three_g 2000 non-null int64 18 touch_screen 2000 non-null int64 19 wifi 2000 non-null int64 20 price_range 2000 non-null int64 dtypes: float64(2), int64(19) memory usage: 328.2 KB <some_examples> {'battery_power': {'0': 842, '1': 1021, '2': 563, '3': 615}, 'blue': {'0': 0, '1': 1, '2': 1, '3': 1}, 'clock_speed': {'0': 2.2, '1': 0.5, '2': 0.5, '3': 2.5}, 'dual_sim': {'0': 0, '1': 1, '2': 1, '3': 0}, 'fc': {'0': 1, '1': 0, '2': 2, '3': 0}, 'four_g': {'0': 0, '1': 1, '2': 1, '3': 0}, 'int_memory': {'0': 7, '1': 53, '2': 41, '3': 10}, 'm_dep': {'0': 0.6, '1': 0.7, '2': 0.9, '3': 0.8}, 'mobile_wt': {'0': 188, '1': 136, '2': 145, '3': 131}, 'n_cores': {'0': 2, '1': 3, '2': 5, '3': 6}, 'pc': {'0': 2, '1': 6, '2': 6, '3': 9}, 'px_height': {'0': 20, '1': 905, '2': 1263, '3': 1216}, 'px_width': {'0': 756, '1': 1988, '2': 1716, '3': 1786}, 'ram': {'0': 2549, '1': 2631, '2': 2603, '3': 2769}, 'sc_h': {'0': 9, '1': 17, '2': 11, '3': 16}, 'sc_w': {'0': 7, '1': 3, '2': 2, '3': 8}, 'talk_time': {'0': 19, '1': 7, '2': 9, '3': 11}, 'three_g': {'0': 0, '1': 1, '2': 1, '3': 1}, 'touch_screen': {'0': 0, '1': 1, '2': 1, '3': 0}, 'wifi': {'0': 1, '1': 0, '2': 0, '3': 0}, 'price_range': {'0': 1, '1': 2, '2': 2, '3': 2}} <end_description>
931
0
2,591
931
129183540
<jupyter_start><jupyter_text>Super Store ## About the data and what to do… A superstore is a very large supermarket, often selling household goods, clothes, and electrical goods, as well as food. Superstores typically charge anywhere from 15 to 45 percent less than their smaller counterparts. As a business manager, try to find out the weak areas where you can work to make more profit. Perform ‘Exploratory Data Analysis’. What all business problems you can derive by exploring the data? Kaggle dataset identifier: super-store <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os df = pd.read_csv("/kaggle/input/super-store/SampleSuperstore.csv") df.head() df.tail() df.columns df.shape print(df.shape) df["Category"].value_counts() df["Sub-Category"].value_counts() df["Segment"].unique() df = df.drop(columns=["Postal Code"], axis=1) print(df.shape) df["Segment"].value_counts() df.head() df["Region"].value_counts() df["Ship Mode"].value_counts() df["State"].value_counts() df["City"].value_counts() df.nunique() df.info() df.describe() df.isnull().sum() for col in df: print(df[col].unique()) df.duplicated().sum() df.drop_duplicates() df.duplicated().sum() df.drop_duplicates() df.corr() types_product = df.groupby(["Category", "Discount"]).count() print(types_product) df.cov() df.iloc[0] df.iloc[:, 0] df.value_counts() plt.figure(figsize=(16, 8)) plt.bar("Sub-Category", "Category", data=df) plt.show() # * In furniture category the frequency of individual items are very low/less. # * In office supplies the frequency of individual items are in medium range. # * In technology the frequency of individual items are in higher range plt.figure(figsize=(16, 8)) plt.bar("Quantity", "Sub-Category", data=df) plt.show() plt.figure(figsize=(12, 12)) df["Sub-Category"].value_counts().plot.pie(autopct="%1.1f%%") plt.show() plt.figure(figsize=(12, 12)) df["Category"].value_counts().plot.pie(autopct="%1.1f%%") plt.show() df.groupby("Sub-Category")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per Sub-Category") plt.rcParams["figure.figsize"] = [10, 8] plt.show() df["Category"].value_counts()[0:5].keys().tolist() plt.figure(figsize=(7, 7)) plt.pie( list(df["Category"].value_counts()), labels=df["Category"].value_counts().keys().tolist(), autopct="%.1f%%", ) plt.show() print(df["State"].value_counts()) plt.figure(figsize=(17, 15)) sns.countplot(x=df["State"]) plt.xticks(rotation=90) plt.show() df.groupby("State")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per State") plt.rcParams["figure.figsize"] = [15, 10] plt.show() df.groupby("Region")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per Region") plt.rcParams["figure.figsize"] = [15, 10] plt.show() df.groupby("Category")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per State") plt.rcParams["figure.figsize"] = [15, 10] plt.show() print(df["Sub-Category"].value_counts()) plt.figure(figsize=(17, 15)) sns.countplot(x=df["Sub-Category"]) plt.xticks(rotation=90) plt.show() print(df["Region"].value_counts()) plt.figure(figsize=(10, 8)) sns.countplot(x=df["Region"]) plt.xticks(rotation=90) plt.show() fig, axes = plt.subplots(1, 1, figsize=(9, 6)) sns.heatmap(df.corr(), annot=True) plt.show() fig, axes = plt.subplots(1, 1, figsize=(9, 6)) sns.heatmap(df.cov(), annot=True) plt.show() sns.countplot(x=df["Segment"]) sns.countplot(x=df["Region"]) plt.figure(figsize=(17, 15)) sns.barplot(x=df["Sub-Category"], y=df["Profit"]) plt.figure(figsize=(10, 4)) sns.lineplot(x="Discount", y="Profit", data=df, color="red", label="Discount") plt.show() df.hist(bins=40, figsize=(20, 20)) plt.show() figsize = (20, 20) sns.pairplot(df, hue="Sub-Category") figsize = (20, 20) sns.pairplot(df, hue="Region") figsize = (20, 20) sns.pairplot(df, hue="Category") figsize = (20, 20) sns.pairplot(df, hue="State") grouped = pd.DataFrame( df.groupby(["Ship Mode", "Segment", "Category", "Sub-Category", "State", "Region"])[ "Quantity", "Discount", "Sales", "Profit" ] .sum() .reset_index() ) grouped df.groupby("State").Profit.agg( ["sum", "mean", "min", "max", "count", "median", "std", "var"] ) sns.pairplot(df) fig, axes = plt.subplots(figsize=(20, 20)) sns.boxplot(df["Profit"]) fig, ax = plt.subplots(figsize=(20, 20)) ax.scatter(df["Sales"], df["Profit"]) ax.set_xlabel("Sales") ax.set_ylabel("Profit") plt.show() print(df["Sales"].describe()) plt.figure(figsize=(10, 8)) plt.grid() sns.distplot(df["Sales"], color="b", bins=50, hist_kws={"alpha": 0.5})
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/183/129183540.ipynb
super-store
itssuru
[{"Id": 129183540, "ScriptId": 38397213, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13664433, "CreationDate": "05/11/2023 15:55:30", "VersionNumber": 1.0, "Title": "notebook09d58cf980", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 207.0, "LinesInsertedFromPrevious": 207.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185005074, "KernelVersionId": 129183540, "SourceDatasetVersionId": 2002654}]
[{"Id": 2002654, "DatasetId": 1198133, "DatasourceVersionId": 2042059, "CreatorUserId": 6640507, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/07/2021 16:53:10", "VersionNumber": 1.0, "Title": "Super Store", "Slug": "super-store", "Subtitle": "find out the weak areas where you can work to make more profit", "Description": "## About the data and what to do\u2026\n\nA superstore is a very large supermarket, often selling household goods, clothes, and electrical goods, as well as food. Superstores typically charge anywhere from 15 to 45 percent less than their smaller counterparts.\nAs a business manager, try to find out the weak areas where you can work to make more profit. Perform \u2018Exploratory Data Analysis\u2019.\nWhat all business problems you can derive by exploring the data?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1198133, "CreatorUserId": 6640507, "OwnerUserId": 6640507.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2002654.0, "CurrentDatasourceVersionId": 2042059.0, "ForumId": 1216046, "Type": 2, "CreationDate": "03/07/2021 16:53:10", "LastActivityDate": "03/07/2021", "TotalViews": 24088, "TotalDownloads": 3770, "TotalVotes": 39, "TotalKernels": 12}]
[{"Id": 6640507, "UserName": "itssuru", "DisplayName": "ItsSuru", "RegisterDate": "02/02/2021", "PerformanceTier": 2}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os df = pd.read_csv("/kaggle/input/super-store/SampleSuperstore.csv") df.head() df.tail() df.columns df.shape print(df.shape) df["Category"].value_counts() df["Sub-Category"].value_counts() df["Segment"].unique() df = df.drop(columns=["Postal Code"], axis=1) print(df.shape) df["Segment"].value_counts() df.head() df["Region"].value_counts() df["Ship Mode"].value_counts() df["State"].value_counts() df["City"].value_counts() df.nunique() df.info() df.describe() df.isnull().sum() for col in df: print(df[col].unique()) df.duplicated().sum() df.drop_duplicates() df.duplicated().sum() df.drop_duplicates() df.corr() types_product = df.groupby(["Category", "Discount"]).count() print(types_product) df.cov() df.iloc[0] df.iloc[:, 0] df.value_counts() plt.figure(figsize=(16, 8)) plt.bar("Sub-Category", "Category", data=df) plt.show() # * In furniture category the frequency of individual items are very low/less. # * In office supplies the frequency of individual items are in medium range. # * In technology the frequency of individual items are in higher range plt.figure(figsize=(16, 8)) plt.bar("Quantity", "Sub-Category", data=df) plt.show() plt.figure(figsize=(12, 12)) df["Sub-Category"].value_counts().plot.pie(autopct="%1.1f%%") plt.show() plt.figure(figsize=(12, 12)) df["Category"].value_counts().plot.pie(autopct="%1.1f%%") plt.show() df.groupby("Sub-Category")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per Sub-Category") plt.rcParams["figure.figsize"] = [10, 8] plt.show() df["Category"].value_counts()[0:5].keys().tolist() plt.figure(figsize=(7, 7)) plt.pie( list(df["Category"].value_counts()), labels=df["Category"].value_counts().keys().tolist(), autopct="%.1f%%", ) plt.show() print(df["State"].value_counts()) plt.figure(figsize=(17, 15)) sns.countplot(x=df["State"]) plt.xticks(rotation=90) plt.show() df.groupby("State")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per State") plt.rcParams["figure.figsize"] = [15, 10] plt.show() df.groupby("Region")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per Region") plt.rcParams["figure.figsize"] = [15, 10] plt.show() df.groupby("Category")["Profit", "Sales"].agg(["sum"]).plot.bar() plt.title("Total Profit Booked on Sale per State") plt.rcParams["figure.figsize"] = [15, 10] plt.show() print(df["Sub-Category"].value_counts()) plt.figure(figsize=(17, 15)) sns.countplot(x=df["Sub-Category"]) plt.xticks(rotation=90) plt.show() print(df["Region"].value_counts()) plt.figure(figsize=(10, 8)) sns.countplot(x=df["Region"]) plt.xticks(rotation=90) plt.show() fig, axes = plt.subplots(1, 1, figsize=(9, 6)) sns.heatmap(df.corr(), annot=True) plt.show() fig, axes = plt.subplots(1, 1, figsize=(9, 6)) sns.heatmap(df.cov(), annot=True) plt.show() sns.countplot(x=df["Segment"]) sns.countplot(x=df["Region"]) plt.figure(figsize=(17, 15)) sns.barplot(x=df["Sub-Category"], y=df["Profit"]) plt.figure(figsize=(10, 4)) sns.lineplot(x="Discount", y="Profit", data=df, color="red", label="Discount") plt.show() df.hist(bins=40, figsize=(20, 20)) plt.show() figsize = (20, 20) sns.pairplot(df, hue="Sub-Category") figsize = (20, 20) sns.pairplot(df, hue="Region") figsize = (20, 20) sns.pairplot(df, hue="Category") figsize = (20, 20) sns.pairplot(df, hue="State") grouped = pd.DataFrame( df.groupby(["Ship Mode", "Segment", "Category", "Sub-Category", "State", "Region"])[ "Quantity", "Discount", "Sales", "Profit" ] .sum() .reset_index() ) grouped df.groupby("State").Profit.agg( ["sum", "mean", "min", "max", "count", "median", "std", "var"] ) sns.pairplot(df) fig, axes = plt.subplots(figsize=(20, 20)) sns.boxplot(df["Profit"]) fig, ax = plt.subplots(figsize=(20, 20)) ax.scatter(df["Sales"], df["Profit"]) ax.set_xlabel("Sales") ax.set_ylabel("Profit") plt.show() print(df["Sales"].describe()) plt.figure(figsize=(10, 8)) plt.grid() sns.distplot(df["Sales"], color="b", bins=50, hist_kws={"alpha": 0.5})
false
1
1,534
0
1,661
1,534
129183978
<jupyter_start><jupyter_text>Car Price Prediction Challenge ## Assignment Your notebooks must contain the following steps: - Perform data cleaning and pre-processing. - What steps did you use in this process and how did you clean your data. - Perform exploratory data analysis on the given dataset. - Explain each and every graphs that you make. - Train a ml-model and evaluate it using different metrics. - Why did you choose that particular model? What was the accuracy? - Hyperparameter optimization and feature selection is a plus. - Model deployment and use of ml-flow is a plus. - Perform model interpretation and show feature importance for your model. - Provide some explanation for the above point. - Future steps. Note: try to have your notebooks as presentable as possible. ## Dataset Description CSV file - 19237 rows x 18 columns (Includes Price Columns as Target) ## Attributes ID Price: price of the care(Target Column) Levy Manufacturer Model Prod. year Category Leather interior Fuel type Engine volume Mileage Cylinders Gear box type Drive wheels Doors Wheel Color Airbags Confused or have any doubts in the data column values? Check the dataset discussion tab! Kaggle dataset identifier: car-price-prediction-challenge <jupyter_code>import pandas as pd df = pd.read_csv('car-price-prediction-challenge/car_price_prediction.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 19237 entries, 0 to 19236 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 19237 non-null int64 1 Price 19237 non-null int64 2 Levy 19237 non-null object 3 Manufacturer 19237 non-null object 4 Model 19237 non-null object 5 Prod. year 19237 non-null int64 6 Category 19237 non-null object 7 Leather interior 19237 non-null object 8 Fuel type 19237 non-null object 9 Engine volume 19237 non-null object 10 Mileage 19237 non-null object 11 Cylinders 19237 non-null float64 12 Gear box type 19237 non-null object 13 Drive wheels 19237 non-null object 14 Doors 19237 non-null object 15 Wheel 19237 non-null object 16 Color 19237 non-null object 17 Airbags 19237 non-null int64 dtypes: float64(1), int64(4), object(13) memory usage: 2.6+ MB <jupyter_text>Examples: { "ID": 45654403, "Price": 13328, "Levy": "1399", "Manufacturer": "LEXUS", "Model": "RX 450", "Prod. year": 2010, "Category": "Jeep", "Leather interior": "Yes", "Fuel type": "Hybrid", "Engine volume": 3.5, "Mileage": "186005 km", "Cylinders": 6, "Gear box type": "Automatic", "Drive wheels": "4x4", "Doors": "04-May", "Wheel": "Left wheel", "Color": "Silver", "Airbags": 12 } { "ID": 44731507, "Price": 16621, "Levy": "1018", "Manufacturer": "CHEVROLET", "Model": "Equinox", "Prod. year": 2011, "Category": "Jeep", "Leather interior": "No", "Fuel type": "Petrol", "Engine volume": 3.0, "Mileage": "192000 km", "Cylinders": 6, "Gear box type": "Tiptronic", "Drive wheels": "4x4", "Doors": "04-May", "Wheel": "Left wheel", "Color": "Black", "Airbags": 8 } { "ID": 45774419, "Price": 8467, "Levy": "-", "Manufacturer": "HONDA", "Model": "FIT", "Prod. year": 2006, "Category": "Hatchback", "Leather interior": "No", "Fuel type": "Petrol", "Engine volume": 1.3, "Mileage": "200000 km", "Cylinders": 4, "Gear box type": "Variator", "Drive wheels": "Front", "Doors": "04-May", "Wheel": "Right-hand drive", "Color": "Black", "Airbags": 2 } { "ID": 45769185, "Price": 3607, "Levy": "862", "Manufacturer": "FORD", "Model": "Escape", "Prod. year": 2011, "Category": "Jeep", "Leather interior": "Yes", "Fuel type": "Hybrid", "Engine volume": 2.5, "Mileage": "168966 km", "Cylinders": 4, "Gear box type": "Automatic", "Drive wheels": "4x4", "Doors": "04-May", "Wheel": "Left wheel", "Color": "White", "Airbags": 0 } <jupyter_script># ## Libraries import numpy as np import scipy.stats as st import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import missingno import datetime as dt import category_encoders as ce from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from xgboost import XGBRegressor from sklearn.metrics import ( mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, ) from sklearn.model_selection import cross_val_score from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV # ### ***Helper funcation tom detect outliers*** def find_outliers_IQR(df): q1 = df.quantile(0.25) q3 = df.quantile(0.75) IQR = q3 - q1 outliers = df[(df < (q1 - 1.5 * IQR)) | (df > (q3 + 1.5 * IQR))] return outliers # # Reading and Understanding the data sns.set_style("darkgrid") df = pd.read_csv( "/kaggle/input/car-price-prediction-challenge/car_price_prediction.csv" ) df.head() df.shape df.info() df.rename(columns={"Prod. year": "year"}, inplace=True) df.head() plt.figure(figsize=(8, 8)) sns.countplot(x=df.dtypes) plt.title("Datatypes of columns in the car dataset") plt.show() print("Count of the datatypes of columns") print(df.dtypes.value_counts()) df.isnull().sum() # checking whether there are null values in the dataset missingno.bar(df) # # Data Preprocessing: df.columns = df.columns.str.lower().str.replace(" ", "_") df.head() df.shape df.describe() df.duplicated().sum() # to find the number of duplicate rows df.loc[df.duplicated()] df.drop_duplicates(inplace=True) df.drop("id", axis=1, inplace=True) print(f"This DataSet Contains {df.shape[0]} rows & {df.shape[1]} columns.") df.shape df.info() # ## Columns in the dataset: df.columns # ### Doors df["doors"] df["doors"].value_counts() # replacing the dates with the number of doors present in the car df["doors"] = df["doors"].str.replace("04-May", "4-5") df["doors"] = df["doors"].str.replace("02-Mar", "2-3") df["doors"].value_counts() # ### Levy df["levy"] # replace '-' with '0' df["levy"] = df["levy"].replace(["-"], ["0"]) df["levy"] = df["levy"].astype(str).astype(int) df["levy"] # ### Engine Volume df["engine_volume"] df["engine_volume"].unique() df["engine_volume"].value_counts() # Remove the string 'Turbo'. # Convert the datatype of the column from object to float. df["turbo"] = df["engine_volume"].str.contains("Turbo").map({False: 0, True: 1}) df["engine_volume"] = df["engine_volume"].str.replace("Turbo", "") df["engine_volume"] = df["engine_volume"].astype(str).astype(float) df["engine_volume"] df["turbo"] # ### Mileage df["mileage"] # Remove the string 'KM'. # Convert it to 'int' datatype. df["mileage"] = df["mileage"].str.replace("km", "") df["mileage"] = df["mileage"].astype(str).astype(int) df["mileage"] # ### Price df["price"] sns.boxplot(df["price"]) price_outliers = find_outliers_IQR(df["price"]) df.drop(price_outliers.index, inplace=True) sns.kdeplot(df["price"]) sns.boxplot(df["price"]) med = df["price"].median() df.loc[find_outliers_IQR(df["price"]).index, "price"] = med df["price"] # ## Exploratory Data Analysis: # ### Univariate Analysis: # #### Levy sns.boxplot(df["levy"]) plt.title("Box Plot before median imputation") plt.show() med = df["levy"].median() df.loc[find_outliers_IQR(df["levy"]).index, "levy"] = med df["levy"] sns.boxplot(df["levy"]) plt.title("Box Plot after median imputation") plt.show() df.drop(find_outliers_IQR(df["levy"]).index, inplace=True) # #### engine_volume df["engine_volume"].value_counts().sort_values(ascending=False).head(10) plt.figure(figsize=(19, 10)) c1 = sns.countplot(x="engine_volume", data=df, palette="mako") plt.xticks(rotation=45) c1.bar_label(c1.containers[0], size=10) plt.show() # * Majority of the cars produced has a 2L engine volume. sns.kdeplot(data=df, x="engine_volume", fill=True) plt.axvline(np.mean(df["engine_volume"]), linestyle="--", c="r", label="Mean") plt.axvline(np.median(df["engine_volume"]), linestyle="--", c="g", label="Median") plt.legend() plt.show() # * The values present in the Engine volume column are positively skewed. "mean sensitive to outlier" sns.boxplot(x=df["engine_volume"]) plt.title("Box Plot before median imputation") plt.show() engine_volume_outliers = find_outliers_IQR(df["engine_volume"]) med = df["engine_volume"].median() df.loc[engine_volume_outliers.index, "engine_volume"] = med sns.boxplot(x=df["engine_volume"]) plt.title("Box Plot after median imputation") plt.show() # #### Turbo turbo_values = df["turbo"].value_counts() turbo_values explode = [0.2, 0] location = ["YES", "No"] plt.pie( turbo_values, labels=location, colors=sns.color_palette("mako"), autopct="%.0f%%", explode=explode, shadow=True, rotatelabels="true", ) plt.show() # #### Mileage df.mileage df.mileage.describe() Mileage = df.mileage.value_counts().head(20) Mileage plt.figure(figsize=(15, 6)) plt.title("Total Number of Cars Mileage(in KM)", fontsize=15) c1 = sns.barplot(x=Mileage.index, y=Mileage, palette="mako") c1.set( xlabel="Mileage", ylabel="Number of Cars", ) plt.show() sns.boxplot(x=df.mileage) plt.show() sns.boxplot(x=df["mileage"]) plt.title("Box Plot before median imputation") plt.show() mileage_outliers = find_outliers_IQR(df["mileage"]) med = df["mileage"].median() df.loc[mileage_outliers.index, "mileage"] = med sns.boxplot(x=df["mileage"]) plt.title("Box Plot after median imputation") plt.show() df.drop(find_outliers_IQR(df["mileage"]).index, inplace=True) sns.boxplot(x=df["mileage"]) plt.title("Box Plot after drop") plt.show() # #### Cylinders df["cylinders"] df["cylinders"].value_counts() plt.figure(figsize=(15, 6)) plt.title("Count of Cars Cylinders", fontsize=15) c1 = sns.countplot(x="cylinders", data=df, palette="mako") c1.set(xlabel="Cylinders", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=10) plt.show() # * cllinders of 4 is the most frequency # #### Airbags df["airbags"] df["airbags"].unique() df["airbags"].value_counts() plt.title("Count of Cars AirBags", fontsize=15) c1 = sns.countplot(x="airbags", data=df, palette="mako") c1.set(xlabel="Air bags", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=10) plt.show() sns.boxplot(df["airbags"]) # ### Categorical Features print("Categorical Variables:") print(df.select_dtypes("object").columns.tolist()) # #### manufacturer df["manufacturer"] df["manufacturer"].unique() Manufacture = df["manufacturer"].value_counts().head(20) plt.figure(figsize=(10, 6)) plt.tight_layout(pad=3) plt.title("Cars Produced by the Manufacturer", fontsize=15) sns.set_style("darkgrid") a = sns.barplot(x=Manufacture.index, y=Manufacture, palette="mako") a.set_xticklabels(Manufacture.index, rotation=90) a.set(xlabel="Manufacturer", ylabel="Number of Cars") plt.show() ax2 = px.treemap( df, path=["manufacturer"], title="Popularity of the Popular Manufacturer:" ) ax2.show() # #### Model Model = df["model"].value_counts().head(20) Model plt.title("Models Produced", fontsize=15) plt.tight_layout(w_pad=15) a = sns.barplot(x=Model.index, y=Model, palette="mako") a.set_xticklabels(Model.index, rotation=90) a.set(xlabel="Model", ylabel="Number of Cars") a.bar_label(a.containers[0], size=10) plt.show() # #### Category df["category"] df["category"].unique() Category = df["category"].value_counts() Category plt.title("Categories of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.barplot(x=Category.index, y=Category, palette="mako") ax.set_xticklabels(Category.index, rotation=45) ax.set(xlabel="Category", ylabel="Number of Cars") ax.bar_label(ax.containers[0]) plt.show() # #### Leather interior df["leather_interior"].value_counts() plt.title("Leather Interior of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="leather_interior", palette="mako") ax.set(ylabel="Number of Cars") ax.bar_label(ax.containers[0]) plt.show() leather_Interior = ["Yes", "No"] plt.figure(figsize=(6, 5)) plt.title("Leather Interiored or Not(Yes/No)") plt.pie( df["leather_interior"].value_counts(), autopct="%1.2f%%", labels=leather_Interior, explode=(0.1, 0.1), ) plt.show() fig, axes = plt.subplots(1, 2, figsize=(12, 6)) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="leather_interior", palette="mako", ax=axes[0]) ax.set(ylabel="Number of Cars") ax.bar_label(ax.containers[0]) ax.title.set_text("Leather Interior of Cars") leather_Interior = ["Yes", "No"] plt.title("Leather Interiored or Not(Yes/No)") axes[1].pie( df["leather_interior"].value_counts(), autopct="%1.2f%%", labels=leather_Interior, explode=(0.1, 0.1), ) plt.show() # #### Fuel Type fuel_type = df["fuel_type"].value_counts() fuel_type plt.title("Fuel type of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="fuel_type", palette="mako") ax.set(xlabel="Fuel Typey", label="Number of Cars") ax.bar_label(ax.containers[0]) ax.set_xticklabels(fuel_type.index, rotation=45) plt.show() df["drive_wheels"].value_counts() labels = df["drive_wheels"].unique().tolist() sizes = df["drive_wheels"].value_counts().tolist() colors = ["lightcyan", "cyan", "royalblue", "darkblue"] colors = ["#66b3ff", "#205bc0", "#94b5ef"] fig = px.pie( df, values=sizes, names=labels, title="Percentage of the Types of Drive Wheels present in Cars", color_discrete_sequence=colors, hole=0.3, ) fig.update_traces(textposition="inside", textinfo="percent+label") fig.update_layout(margin=dict(b=0, l=0, r=0)) fig.show() # #### Doors plt.title("No of Doors in Cars", fontsize=15) doors = df["doors"].value_counts() c1 = sns.countplot(data=df, x="doors", palette="mako") c1.set(xlabel="Doors", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=12) plt.show() # #### Wheels df["wheel"].value_counts() labels = df["wheel"].unique().tolist() sizes = df["wheel"].value_counts().tolist() fig = px.pie( df, values=sizes, names=labels, title="Percentage of the Types of Wheels present in Cars", color_discrete_sequence=px.colors.sequential.Blues_r, ) fig.update_traces(textposition="inside", textinfo="percent+label") fig.update_layout(margin=dict(b=0, l=0, r=0)) fig.show() # #### Gear box type df["gear_box_type"].value_counts() plt.title("Gear box type", fontsize=15) c1 = sns.countplot(x=df["gear_box_type"], palette="mako") c1.bar_label(c1.containers[0], size=12) c1.set(xlabel="Gear Box Type", ylabel="No of cars") plt.show() # * Most of the models have a Automatic kind of gear box. # ## Multivariate Analysis plt.figure(figsize=(14, 10)) plot = sns.heatmap( abs(df.select_dtypes("number").corr()), annot=True, cmap="mako", vmin=-1 ) plt.title( "Correlation Heatmap of Car price prediction Dataset", weight="bold", fontsize=15 ) plot.set_xticklabels(plot.get_xticklabels(), rotation=0, horizontalalignment="center") plot.set_yticklabels(plot.get_yticklabels(), rotation=0, horizontalalignment="right") plt.show() # * There is no strong correlation among the variables . df[["price", "cylinders"]].corr() Target_corr = ( df.select_dtypes("number") .corr() .loc[:, "price"] .to_frame() .sort_values(by="price", ascending=False) ) plot = sns.heatmap(Target_corr, annot=True, cmap="mako", vmin=-1) plt.title("Correlation Heatmap of Car price prediction Dataset", weight="bold") plot.set_xticklabels(plot.get_xticklabels(), rotation=0, horizontalalignment="center") plot.set_yticklabels(plot.get_yticklabels(), rotation=0, horizontalalignment="right") plt.show() df.groupby("manufacturer")["mileage"].max().sort_values(ascending=False) df.groupby("model")["mileage"].max().sort_values(ascending=False) # ### Year wise variety of Cars: df_wise_year = ( df.groupby(by=["year"], as_index=False) .count() .sort_values(by="year", ascending=False)[["year", "price"]] ) df_wise_year = df_wise_year.rename(columns={"price": "count"}) df_wise_year["prd_yr_per"] = round( df_wise_year["count"] / sum(df_wise_year["count"]) * 100, 2 ) df_wise_year plt.figure(figsize=(18, 8)) plot = sns.barplot(x="year", y="count", data=df_wise_year, palette="Blues") plt.title("Year wise variety of cars", fontsize=16) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plot.bar_label(container=plot.containers[0]) plt.xlabel("Year of production", fontsize=12) plt.ylabel("Number of car varieties", fontsize=12) plt.show() # * The quantity of car variants each year gradualy increases upto 2015 after which we get to witness a decline. # ### Manufacturer's variety of cars: df_wise_manu = ( df.groupby(by=["manufacturer"], as_index=False) .count() .sort_values(by="price", ascending=False)[["manufacturer", "price"]] ) df_wise_manu = df_wise_manu.rename(columns={"price": "count"}) df_wise_manu plot = sns.barplot( x="manufacturer", y="count", data=df_wise_manu.head(20), palette="Blues_r" ) plt.title("manufacturer wise variety of cars", fontsize=16) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plt.xlabel("manufacturer of production", fontsize=12) plt.ylabel("Number of car varieties", fontsize=12) plt.show() # * Hyundai , Toyota and Mercedes-Benz have the highest variants of cars. # ### Mean Price wise analysis of each feature lcv = df.select_dtypes("object") for column in lcv: print(column) print( df[["price", column]] .groupby(by=[column], as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) lcv.drop(columns=["manufacturer", "model"], inplace=True) def annot_percent(axes): for p in plot.patches: total = sum(p.get_height() for p in plot.patches) / 100 percent = round((p.get_height() / total), 2) x = p.get_x() + p.get_width() / 2 y = p.get_height() * 1.015 plot.annotate(f"{percent}%", (x, y), ha="center", va="bottom") plt.figure(figsize=(15, 20)) for n, column in enumerate(lcv): plot = plt.subplot(4, 2, n + 1) c1 = sns.barplot(x=df[column], y=df["price"], palette="Blues_r", errorbar=None) plot.set_xticklabels( plot.get_xticklabels(), rotation=30, horizontalalignment="center" ) plt.title(f"{column.title()}", weight="bold") plt.tight_layout() # Add annotations to the bar plot annot_percent(plot) # * The average price of Diesel car is maximum and CNG is the minimum. # * The average price of 4x4,Front and Rear drives wheels are almost similar. # * 2-3 Doors has the highest average maximum price. # * Left wheel drive has the maximum average price. df_price_wise_manu = ( df[["price", "manufacturer"]] .groupby(by="manufacturer", as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) df_price_wise_manu c1 = sns.barplot( data=df_price_wise_manu.head(20), x="manufacturer", y="price", palette="Blues_r" ) c1.set_xticklabels(c1.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("manufacturer", weight="bold") plt.tight_layout() df_price_wise_model = ( df[["price", "model"]] .groupby(by="model", as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) df_price_wise_model c1 = sns.barplot( data=df_price_wise_model.head(20), x="model", y="price", palette="Blues_r" ) c1.set_xticklabels(c1.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("model", weight="bold") plt.tight_layout() # ### Average Production Price for each year: price_wise_years = ( df.groupby("year")["price"].agg("mean").sort_values(ascending=False).to_frame() ) price_wise_years.head(10) plt.figure(figsize=(18, 8)) plot = sns.barplot(x="year", y="price", data=df, palette="Blues", errorbar=None) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("Average price for each production year", fontsize=16) plt.xlabel("Production year", fontsize=15) plt.ylabel("Mean Price", fontsize=15) plt.show() px.bar(data_frame=df, x="year", y="price") # # * From the given plot we can infer that there seems to be general increase in the average price of the car each year. # # px.line(data_frame=df,x='year',y='price') sns.lineplot(data=df, x="year", y="price") # ### Comparison of year wise mean price and individual features plt.figure(figsize=(18, 5)) for n, column in enumerate(lcv): sns.lineplot(data=df, x="year", y="price", hue=column, errorbar=None) plt.title( f"Year wise variety mean price of cars vs {column}", fontsize=16, weight="bold" ) plt.xlabel("Year of production", fontsize=12) plt.ylabel("Mean price", fontsize=12) plt.show() # #### From the above plots we can infer that: # * The average price of tiptronic engines have risen due to a surge in demand over the last few years. # * Leather interior's average price is always high. # * There is also an increase in the average price of diesel engines. df.columns # ## Feature Engineering # df_encoding = df.copy() df_encoding.shape targetEncod = ce.LeaveOneOutEncoder() ordinalEncoder = OrdinalEncoder() hotEncoder = OneHotEncoder(sparse_output=False, drop="first") # # * Iam using a OneHotLeaveTargetEncoder for catgory feature that have aunique values more than 15 item # * for less than 15 item I provide a two ways OrdinalEncoder and OneHotEncoder # * Ordinal Encoder Given Me A Accuracy better than oneHotEncoder # * but i leave my example for oneHotEncoder for anyone want it # df.groupby("manufacturer").mean()["price"] # ### manufacturer df_encoding["manufacturer"] = targetEncod.fit_transform(df["manufacturer"], df["price"]) df_encoding # ### Model df_encoding["model"] = targetEncod.fit_transform(df["model"], df["price"]) df_encoding.head() # ### category df_encoding["category"].unique() ## Ordinal Encoder category_encoded = ordinalEncoder.fit_transform(df_encoding[["category"]]) category_encoded df_encoding["category"] = category_encoded # # > OneHot Encoder # # category_df = pd.DataFrame(category_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['category'].index) # category_df.head() # df_encoding=pd.concat([df_encoding.drop('category', axis=1), category_df], axis=1) # df_encoding.head() # ### leather_interior,doors df_encoding["leather_interior"] = df["leather_interior"].map({"Yes": 1, "No": 0}) df_encoding["doors"] = df["doors"].map({"4-5": 4, "2-3": 2, ">5": 5}) df_encoding.head() # ### fuel_type df["fuel_type"].unique() fuel_type_encoded = ordinalEncoder.fit_transform(df[["fuel_type"]]) fuel_type_encoded df_encoding["fuel_type"] = fuel_type_encoded # # > OneHot Encoder # # fuel_type_df = pd.DataFrame(fuel_type_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['fuel_type'].index) # fuel_type_df.head() # df_encoding=pd.concat([df_encoding.drop('fuel_type', axis=1), fuel_type_df], axis=1) # df_encoding.head() # ### gear_box_type df["gear_box_type"].unique() gear_box_type_encoded = ordinalEncoder.fit_transform(df[["gear_box_type"]]) df_encoding["gear_box_type"] = gear_box_type_encoded # # > OneHot Encoder # # gear_box_type_encoded = hotEncoder.fit_transform(df[['gear_box_type']]) # gear_box_type_df = pd.DataFrame(gear_box_type_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['gear_box_type'].index) # gear_box_type_df.head() # df_encoding=pd.concat([df_encoding.drop('gear_box_type', axis=1), gear_box_type_df], axis=1) # df_encoding.head() df_encoding.info() # ### drive_wheels df["drive_wheels"].value_counts() drive_wheels_encoded = ordinalEncoder.fit_transform(df[["drive_wheels"]]) df_encoding["drive_wheels"] = drive_wheels_encoded # # > OneHot Encoder # # drive_wheels_encoded = hotEncoder.fit_transform(df[['drive_wheels']]) # drive_wheels_df = pd.DataFrame(drive_wheels_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df['drive_wheels'].index) # drive_wheels_df.head() # df_encoding=pd.concat([df_encoding.drop('drive_wheels', axis=1), drive_wheels_df], axis=1) # df_encoding.head() # ### wheel df["wheel"].unique() df_encoding["wheel"] = df["wheel"].map({"Left wheel": 0, "Right-hand drive": 1}) df_encoding.head() df_encoding.head() # ### color df_encoding.drop(["color"], axis=1, inplace=True) df_encoding.head(5) # ## ML Model df_cleaned = df_encoding.copy() df_cleaned.drop_duplicates(inplace=True) df_cleaned.head(5) # ### Train and test split x = df_cleaned.drop("price", axis=1) y = df_cleaned["price"] x.shape, y.shape X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=42 ) X_train y_train # ### Scaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) def caluc_acc(model): model_y_pred = model.predict(X_test) train_score = model.score(X_train, y_train) print(f"the accuracy of training is: {train_score*100}") model_accuracy = ( round(r2_score(y_test, model_y_pred), 3) * 100 ) # use r2_score as the regression metric print("{:s} : {:.0f}%".format(model.__class__.__name__, model_accuracy)) # ### **Linear** *Regression* lr_model = LinearRegression() lr_model.fit(X_train, y_train) y_pred = lr_model.predict(X_test) y_pred # y_pred = lr_model.predict(X_tesy) # lr_accuracy =round(r2_score(y_test, y_pred),3)*100 # use r2_score as the regression metric train_score = lr_model.score(X_train, y_train) test_score = lr_model.score(X_test, y_test) print(f"the accuracy of training is: {train_score*100}") print(f"the accuracy of testing is: {test_score}") # ### RandomForestRegressor # rfr = RandomForestRegressor(n_estimators=180) rfr.fit(X_train, y_train) train_score = rfr.score(X_train, y_train) test_score = rfr.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") scores = cross_val_score(rfr, X_train, y_train, scoring="r2", cv=5) scores # ### XGB Regressor xgb = XGBRegressor(n_estimators=150, learning_rate=0.2) xgb.fit(X_train, y_train) yhat = xgb.predict(X_test) train_score = xgb.score(X_train, y_train) test_score = xgb.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") # ### Grid Search param_grid = { "learning_rate": [0.1, 0.01, 0.001], "max_depth": [3, 5, 7], "n_estimators": [100, 500, 1000], } grid_search = GridSearchCV( estimator=XGBRegressor(), param_grid=param_grid, scoring="neg_mean_squared_error", cv=5, ) grid_search.fit(X_train, y_train) # X and y are your training data best_params = grid_search.best_params_ best_model = grid_search.best_estimator_ best_params best_model y_pred = best_model.predict(X_test) # X_test is your test data print("MAE: %.3f" % mean_absolute_error(y_test, y_pred)) # ### Best Model xgb = XGBRegressor(n_estimators=800, learning_rate=0.1, max_depth=5) xgb.fit(X_train, y_train) yhat = xgb.predict(X_test) train_score = xgb.score(X_train, y_train) test_score = xgb.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") mae = mean_absolute_error(y_test, yhat) print("MAE: %.3f" % mae) mse = mean_squared_error(y_test, yhat) mse np.sqrt(mse) 1976.6206567153217 / df["price"].sum() mean_absolute_percentage_error(yhat, y_test) r2_score(yhat, y_test) output_2 = pd.DataFrame({"Price": yhat, "real_price": y_test}) output_2 scores = cross_val_score(xgb, x, y, scoring="r2", cv=10) scores plt.scatter(yhat, y_test) plt.xlabel("Predicted values") plt.ylabel("Actual values") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/183/129183978.ipynb
car-price-prediction-challenge
deepcontractor
[{"Id": 129183978, "ScriptId": 38405193, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13075973, "CreationDate": "05/11/2023 15:59:31", "VersionNumber": 1.0, "Title": "notebook31a99b6071", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 862.0, "LinesInsertedFromPrevious": 862.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185005875, "KernelVersionId": 129183978, "SourceDatasetVersionId": 3909829}]
[{"Id": 3909829, "DatasetId": 2322277, "DatasourceVersionId": 3965025, "CreatorUserId": 3682357, "LicenseName": "CC0: Public Domain", "CreationDate": "07/06/2022 11:38:32", "VersionNumber": 1.0, "Title": "Car Price Prediction Challenge", "Slug": "car-price-prediction-challenge", "Subtitle": "A dataset to practice regression by predicting the prices of different cars.", "Description": "## Assignment \n\nYour notebooks must contain the following steps:\n\n- Perform data cleaning and pre-processing.\n - What steps did you use in this process and how did you clean your data.\n- Perform exploratory data analysis on the given dataset.\n - Explain each and every graphs that you make.\n- Train a ml-model and evaluate it using different metrics.\n - Why did you choose that particular model? What was the accuracy?\n- Hyperparameter optimization and feature selection is a plus.\n- Model deployment and use of ml-flow is a plus.\n- Perform model interpretation and show feature importance for your model.\n - Provide some explanation for the above point.\n- Future steps.\nNote: try to have your notebooks as presentable as possible.\n\n## Dataset Description\n\nCSV file - 19237 rows x 18 columns (Includes Price Columns as Target)\n\n## Attributes\nID\nPrice: price of the care(Target Column)\nLevy\nManufacturer\nModel\nProd. year\nCategory\nLeather interior\nFuel type\nEngine volume\nMileage\nCylinders\nGear box type\nDrive wheels\nDoors\nWheel\nColor\nAirbags\n\nConfused or have any doubts in the data column values? Check the dataset discussion tab!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2322277, "CreatorUserId": 3682357, "OwnerUserId": 3682357.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3909829.0, "CurrentDatasourceVersionId": 3965025.0, "ForumId": 2349083, "Type": 2, "CreationDate": "07/06/2022 11:38:32", "LastActivityDate": "07/06/2022", "TotalViews": 48686, "TotalDownloads": 6518, "TotalVotes": 130, "TotalKernels": 70}]
[{"Id": 3682357, "UserName": "deepcontractor", "DisplayName": "Deep Contractor", "RegisterDate": "09/09/2019", "PerformanceTier": 4}]
# ## Libraries import numpy as np import scipy.stats as st import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import missingno import datetime as dt import category_encoders as ce from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from xgboost import XGBRegressor from sklearn.metrics import ( mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, ) from sklearn.model_selection import cross_val_score from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV # ### ***Helper funcation tom detect outliers*** def find_outliers_IQR(df): q1 = df.quantile(0.25) q3 = df.quantile(0.75) IQR = q3 - q1 outliers = df[(df < (q1 - 1.5 * IQR)) | (df > (q3 + 1.5 * IQR))] return outliers # # Reading and Understanding the data sns.set_style("darkgrid") df = pd.read_csv( "/kaggle/input/car-price-prediction-challenge/car_price_prediction.csv" ) df.head() df.shape df.info() df.rename(columns={"Prod. year": "year"}, inplace=True) df.head() plt.figure(figsize=(8, 8)) sns.countplot(x=df.dtypes) plt.title("Datatypes of columns in the car dataset") plt.show() print("Count of the datatypes of columns") print(df.dtypes.value_counts()) df.isnull().sum() # checking whether there are null values in the dataset missingno.bar(df) # # Data Preprocessing: df.columns = df.columns.str.lower().str.replace(" ", "_") df.head() df.shape df.describe() df.duplicated().sum() # to find the number of duplicate rows df.loc[df.duplicated()] df.drop_duplicates(inplace=True) df.drop("id", axis=1, inplace=True) print(f"This DataSet Contains {df.shape[0]} rows & {df.shape[1]} columns.") df.shape df.info() # ## Columns in the dataset: df.columns # ### Doors df["doors"] df["doors"].value_counts() # replacing the dates with the number of doors present in the car df["doors"] = df["doors"].str.replace("04-May", "4-5") df["doors"] = df["doors"].str.replace("02-Mar", "2-3") df["doors"].value_counts() # ### Levy df["levy"] # replace '-' with '0' df["levy"] = df["levy"].replace(["-"], ["0"]) df["levy"] = df["levy"].astype(str).astype(int) df["levy"] # ### Engine Volume df["engine_volume"] df["engine_volume"].unique() df["engine_volume"].value_counts() # Remove the string 'Turbo'. # Convert the datatype of the column from object to float. df["turbo"] = df["engine_volume"].str.contains("Turbo").map({False: 0, True: 1}) df["engine_volume"] = df["engine_volume"].str.replace("Turbo", "") df["engine_volume"] = df["engine_volume"].astype(str).astype(float) df["engine_volume"] df["turbo"] # ### Mileage df["mileage"] # Remove the string 'KM'. # Convert it to 'int' datatype. df["mileage"] = df["mileage"].str.replace("km", "") df["mileage"] = df["mileage"].astype(str).astype(int) df["mileage"] # ### Price df["price"] sns.boxplot(df["price"]) price_outliers = find_outliers_IQR(df["price"]) df.drop(price_outliers.index, inplace=True) sns.kdeplot(df["price"]) sns.boxplot(df["price"]) med = df["price"].median() df.loc[find_outliers_IQR(df["price"]).index, "price"] = med df["price"] # ## Exploratory Data Analysis: # ### Univariate Analysis: # #### Levy sns.boxplot(df["levy"]) plt.title("Box Plot before median imputation") plt.show() med = df["levy"].median() df.loc[find_outliers_IQR(df["levy"]).index, "levy"] = med df["levy"] sns.boxplot(df["levy"]) plt.title("Box Plot after median imputation") plt.show() df.drop(find_outliers_IQR(df["levy"]).index, inplace=True) # #### engine_volume df["engine_volume"].value_counts().sort_values(ascending=False).head(10) plt.figure(figsize=(19, 10)) c1 = sns.countplot(x="engine_volume", data=df, palette="mako") plt.xticks(rotation=45) c1.bar_label(c1.containers[0], size=10) plt.show() # * Majority of the cars produced has a 2L engine volume. sns.kdeplot(data=df, x="engine_volume", fill=True) plt.axvline(np.mean(df["engine_volume"]), linestyle="--", c="r", label="Mean") plt.axvline(np.median(df["engine_volume"]), linestyle="--", c="g", label="Median") plt.legend() plt.show() # * The values present in the Engine volume column are positively skewed. "mean sensitive to outlier" sns.boxplot(x=df["engine_volume"]) plt.title("Box Plot before median imputation") plt.show() engine_volume_outliers = find_outliers_IQR(df["engine_volume"]) med = df["engine_volume"].median() df.loc[engine_volume_outliers.index, "engine_volume"] = med sns.boxplot(x=df["engine_volume"]) plt.title("Box Plot after median imputation") plt.show() # #### Turbo turbo_values = df["turbo"].value_counts() turbo_values explode = [0.2, 0] location = ["YES", "No"] plt.pie( turbo_values, labels=location, colors=sns.color_palette("mako"), autopct="%.0f%%", explode=explode, shadow=True, rotatelabels="true", ) plt.show() # #### Mileage df.mileage df.mileage.describe() Mileage = df.mileage.value_counts().head(20) Mileage plt.figure(figsize=(15, 6)) plt.title("Total Number of Cars Mileage(in KM)", fontsize=15) c1 = sns.barplot(x=Mileage.index, y=Mileage, palette="mako") c1.set( xlabel="Mileage", ylabel="Number of Cars", ) plt.show() sns.boxplot(x=df.mileage) plt.show() sns.boxplot(x=df["mileage"]) plt.title("Box Plot before median imputation") plt.show() mileage_outliers = find_outliers_IQR(df["mileage"]) med = df["mileage"].median() df.loc[mileage_outliers.index, "mileage"] = med sns.boxplot(x=df["mileage"]) plt.title("Box Plot after median imputation") plt.show() df.drop(find_outliers_IQR(df["mileage"]).index, inplace=True) sns.boxplot(x=df["mileage"]) plt.title("Box Plot after drop") plt.show() # #### Cylinders df["cylinders"] df["cylinders"].value_counts() plt.figure(figsize=(15, 6)) plt.title("Count of Cars Cylinders", fontsize=15) c1 = sns.countplot(x="cylinders", data=df, palette="mako") c1.set(xlabel="Cylinders", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=10) plt.show() # * cllinders of 4 is the most frequency # #### Airbags df["airbags"] df["airbags"].unique() df["airbags"].value_counts() plt.title("Count of Cars AirBags", fontsize=15) c1 = sns.countplot(x="airbags", data=df, palette="mako") c1.set(xlabel="Air bags", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=10) plt.show() sns.boxplot(df["airbags"]) # ### Categorical Features print("Categorical Variables:") print(df.select_dtypes("object").columns.tolist()) # #### manufacturer df["manufacturer"] df["manufacturer"].unique() Manufacture = df["manufacturer"].value_counts().head(20) plt.figure(figsize=(10, 6)) plt.tight_layout(pad=3) plt.title("Cars Produced by the Manufacturer", fontsize=15) sns.set_style("darkgrid") a = sns.barplot(x=Manufacture.index, y=Manufacture, palette="mako") a.set_xticklabels(Manufacture.index, rotation=90) a.set(xlabel="Manufacturer", ylabel="Number of Cars") plt.show() ax2 = px.treemap( df, path=["manufacturer"], title="Popularity of the Popular Manufacturer:" ) ax2.show() # #### Model Model = df["model"].value_counts().head(20) Model plt.title("Models Produced", fontsize=15) plt.tight_layout(w_pad=15) a = sns.barplot(x=Model.index, y=Model, palette="mako") a.set_xticklabels(Model.index, rotation=90) a.set(xlabel="Model", ylabel="Number of Cars") a.bar_label(a.containers[0], size=10) plt.show() # #### Category df["category"] df["category"].unique() Category = df["category"].value_counts() Category plt.title("Categories of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.barplot(x=Category.index, y=Category, palette="mako") ax.set_xticklabels(Category.index, rotation=45) ax.set(xlabel="Category", ylabel="Number of Cars") ax.bar_label(ax.containers[0]) plt.show() # #### Leather interior df["leather_interior"].value_counts() plt.title("Leather Interior of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="leather_interior", palette="mako") ax.set(ylabel="Number of Cars") ax.bar_label(ax.containers[0]) plt.show() leather_Interior = ["Yes", "No"] plt.figure(figsize=(6, 5)) plt.title("Leather Interiored or Not(Yes/No)") plt.pie( df["leather_interior"].value_counts(), autopct="%1.2f%%", labels=leather_Interior, explode=(0.1, 0.1), ) plt.show() fig, axes = plt.subplots(1, 2, figsize=(12, 6)) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="leather_interior", palette="mako", ax=axes[0]) ax.set(ylabel="Number of Cars") ax.bar_label(ax.containers[0]) ax.title.set_text("Leather Interior of Cars") leather_Interior = ["Yes", "No"] plt.title("Leather Interiored or Not(Yes/No)") axes[1].pie( df["leather_interior"].value_counts(), autopct="%1.2f%%", labels=leather_Interior, explode=(0.1, 0.1), ) plt.show() # #### Fuel Type fuel_type = df["fuel_type"].value_counts() fuel_type plt.title("Fuel type of Cars", fontsize=20) plt.tight_layout(pad=5) ax = sns.countplot(data=df, x="fuel_type", palette="mako") ax.set(xlabel="Fuel Typey", label="Number of Cars") ax.bar_label(ax.containers[0]) ax.set_xticklabels(fuel_type.index, rotation=45) plt.show() df["drive_wheels"].value_counts() labels = df["drive_wheels"].unique().tolist() sizes = df["drive_wheels"].value_counts().tolist() colors = ["lightcyan", "cyan", "royalblue", "darkblue"] colors = ["#66b3ff", "#205bc0", "#94b5ef"] fig = px.pie( df, values=sizes, names=labels, title="Percentage of the Types of Drive Wheels present in Cars", color_discrete_sequence=colors, hole=0.3, ) fig.update_traces(textposition="inside", textinfo="percent+label") fig.update_layout(margin=dict(b=0, l=0, r=0)) fig.show() # #### Doors plt.title("No of Doors in Cars", fontsize=15) doors = df["doors"].value_counts() c1 = sns.countplot(data=df, x="doors", palette="mako") c1.set(xlabel="Doors", ylabel="No of Cars") c1.bar_label(c1.containers[0], size=12) plt.show() # #### Wheels df["wheel"].value_counts() labels = df["wheel"].unique().tolist() sizes = df["wheel"].value_counts().tolist() fig = px.pie( df, values=sizes, names=labels, title="Percentage of the Types of Wheels present in Cars", color_discrete_sequence=px.colors.sequential.Blues_r, ) fig.update_traces(textposition="inside", textinfo="percent+label") fig.update_layout(margin=dict(b=0, l=0, r=0)) fig.show() # #### Gear box type df["gear_box_type"].value_counts() plt.title("Gear box type", fontsize=15) c1 = sns.countplot(x=df["gear_box_type"], palette="mako") c1.bar_label(c1.containers[0], size=12) c1.set(xlabel="Gear Box Type", ylabel="No of cars") plt.show() # * Most of the models have a Automatic kind of gear box. # ## Multivariate Analysis plt.figure(figsize=(14, 10)) plot = sns.heatmap( abs(df.select_dtypes("number").corr()), annot=True, cmap="mako", vmin=-1 ) plt.title( "Correlation Heatmap of Car price prediction Dataset", weight="bold", fontsize=15 ) plot.set_xticklabels(plot.get_xticklabels(), rotation=0, horizontalalignment="center") plot.set_yticklabels(plot.get_yticklabels(), rotation=0, horizontalalignment="right") plt.show() # * There is no strong correlation among the variables . df[["price", "cylinders"]].corr() Target_corr = ( df.select_dtypes("number") .corr() .loc[:, "price"] .to_frame() .sort_values(by="price", ascending=False) ) plot = sns.heatmap(Target_corr, annot=True, cmap="mako", vmin=-1) plt.title("Correlation Heatmap of Car price prediction Dataset", weight="bold") plot.set_xticklabels(plot.get_xticklabels(), rotation=0, horizontalalignment="center") plot.set_yticklabels(plot.get_yticklabels(), rotation=0, horizontalalignment="right") plt.show() df.groupby("manufacturer")["mileage"].max().sort_values(ascending=False) df.groupby("model")["mileage"].max().sort_values(ascending=False) # ### Year wise variety of Cars: df_wise_year = ( df.groupby(by=["year"], as_index=False) .count() .sort_values(by="year", ascending=False)[["year", "price"]] ) df_wise_year = df_wise_year.rename(columns={"price": "count"}) df_wise_year["prd_yr_per"] = round( df_wise_year["count"] / sum(df_wise_year["count"]) * 100, 2 ) df_wise_year plt.figure(figsize=(18, 8)) plot = sns.barplot(x="year", y="count", data=df_wise_year, palette="Blues") plt.title("Year wise variety of cars", fontsize=16) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plot.bar_label(container=plot.containers[0]) plt.xlabel("Year of production", fontsize=12) plt.ylabel("Number of car varieties", fontsize=12) plt.show() # * The quantity of car variants each year gradualy increases upto 2015 after which we get to witness a decline. # ### Manufacturer's variety of cars: df_wise_manu = ( df.groupby(by=["manufacturer"], as_index=False) .count() .sort_values(by="price", ascending=False)[["manufacturer", "price"]] ) df_wise_manu = df_wise_manu.rename(columns={"price": "count"}) df_wise_manu plot = sns.barplot( x="manufacturer", y="count", data=df_wise_manu.head(20), palette="Blues_r" ) plt.title("manufacturer wise variety of cars", fontsize=16) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plt.xlabel("manufacturer of production", fontsize=12) plt.ylabel("Number of car varieties", fontsize=12) plt.show() # * Hyundai , Toyota and Mercedes-Benz have the highest variants of cars. # ### Mean Price wise analysis of each feature lcv = df.select_dtypes("object") for column in lcv: print(column) print( df[["price", column]] .groupby(by=[column], as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) lcv.drop(columns=["manufacturer", "model"], inplace=True) def annot_percent(axes): for p in plot.patches: total = sum(p.get_height() for p in plot.patches) / 100 percent = round((p.get_height() / total), 2) x = p.get_x() + p.get_width() / 2 y = p.get_height() * 1.015 plot.annotate(f"{percent}%", (x, y), ha="center", va="bottom") plt.figure(figsize=(15, 20)) for n, column in enumerate(lcv): plot = plt.subplot(4, 2, n + 1) c1 = sns.barplot(x=df[column], y=df["price"], palette="Blues_r", errorbar=None) plot.set_xticklabels( plot.get_xticklabels(), rotation=30, horizontalalignment="center" ) plt.title(f"{column.title()}", weight="bold") plt.tight_layout() # Add annotations to the bar plot annot_percent(plot) # * The average price of Diesel car is maximum and CNG is the minimum. # * The average price of 4x4,Front and Rear drives wheels are almost similar. # * 2-3 Doors has the highest average maximum price. # * Left wheel drive has the maximum average price. df_price_wise_manu = ( df[["price", "manufacturer"]] .groupby(by="manufacturer", as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) df_price_wise_manu c1 = sns.barplot( data=df_price_wise_manu.head(20), x="manufacturer", y="price", palette="Blues_r" ) c1.set_xticklabels(c1.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("manufacturer", weight="bold") plt.tight_layout() df_price_wise_model = ( df[["price", "model"]] .groupby(by="model", as_index=False) .agg("mean") .sort_values(by="price", ascending=False) ) df_price_wise_model c1 = sns.barplot( data=df_price_wise_model.head(20), x="model", y="price", palette="Blues_r" ) c1.set_xticklabels(c1.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("model", weight="bold") plt.tight_layout() # ### Average Production Price for each year: price_wise_years = ( df.groupby("year")["price"].agg("mean").sort_values(ascending=False).to_frame() ) price_wise_years.head(10) plt.figure(figsize=(18, 8)) plot = sns.barplot(x="year", y="price", data=df, palette="Blues", errorbar=None) plot.set_xticklabels(plot.get_xticklabels(), rotation=90, horizontalalignment="center") plt.title("Average price for each production year", fontsize=16) plt.xlabel("Production year", fontsize=15) plt.ylabel("Mean Price", fontsize=15) plt.show() px.bar(data_frame=df, x="year", y="price") # # * From the given plot we can infer that there seems to be general increase in the average price of the car each year. # # px.line(data_frame=df,x='year',y='price') sns.lineplot(data=df, x="year", y="price") # ### Comparison of year wise mean price and individual features plt.figure(figsize=(18, 5)) for n, column in enumerate(lcv): sns.lineplot(data=df, x="year", y="price", hue=column, errorbar=None) plt.title( f"Year wise variety mean price of cars vs {column}", fontsize=16, weight="bold" ) plt.xlabel("Year of production", fontsize=12) plt.ylabel("Mean price", fontsize=12) plt.show() # #### From the above plots we can infer that: # * The average price of tiptronic engines have risen due to a surge in demand over the last few years. # * Leather interior's average price is always high. # * There is also an increase in the average price of diesel engines. df.columns # ## Feature Engineering # df_encoding = df.copy() df_encoding.shape targetEncod = ce.LeaveOneOutEncoder() ordinalEncoder = OrdinalEncoder() hotEncoder = OneHotEncoder(sparse_output=False, drop="first") # # * Iam using a OneHotLeaveTargetEncoder for catgory feature that have aunique values more than 15 item # * for less than 15 item I provide a two ways OrdinalEncoder and OneHotEncoder # * Ordinal Encoder Given Me A Accuracy better than oneHotEncoder # * but i leave my example for oneHotEncoder for anyone want it # df.groupby("manufacturer").mean()["price"] # ### manufacturer df_encoding["manufacturer"] = targetEncod.fit_transform(df["manufacturer"], df["price"]) df_encoding # ### Model df_encoding["model"] = targetEncod.fit_transform(df["model"], df["price"]) df_encoding.head() # ### category df_encoding["category"].unique() ## Ordinal Encoder category_encoded = ordinalEncoder.fit_transform(df_encoding[["category"]]) category_encoded df_encoding["category"] = category_encoded # # > OneHot Encoder # # category_df = pd.DataFrame(category_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['category'].index) # category_df.head() # df_encoding=pd.concat([df_encoding.drop('category', axis=1), category_df], axis=1) # df_encoding.head() # ### leather_interior,doors df_encoding["leather_interior"] = df["leather_interior"].map({"Yes": 1, "No": 0}) df_encoding["doors"] = df["doors"].map({"4-5": 4, "2-3": 2, ">5": 5}) df_encoding.head() # ### fuel_type df["fuel_type"].unique() fuel_type_encoded = ordinalEncoder.fit_transform(df[["fuel_type"]]) fuel_type_encoded df_encoding["fuel_type"] = fuel_type_encoded # # > OneHot Encoder # # fuel_type_df = pd.DataFrame(fuel_type_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['fuel_type'].index) # fuel_type_df.head() # df_encoding=pd.concat([df_encoding.drop('fuel_type', axis=1), fuel_type_df], axis=1) # df_encoding.head() # ### gear_box_type df["gear_box_type"].unique() gear_box_type_encoded = ordinalEncoder.fit_transform(df[["gear_box_type"]]) df_encoding["gear_box_type"] = gear_box_type_encoded # # > OneHot Encoder # # gear_box_type_encoded = hotEncoder.fit_transform(df[['gear_box_type']]) # gear_box_type_df = pd.DataFrame(gear_box_type_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df_encoding['gear_box_type'].index) # gear_box_type_df.head() # df_encoding=pd.concat([df_encoding.drop('gear_box_type', axis=1), gear_box_type_df], axis=1) # df_encoding.head() df_encoding.info() # ### drive_wheels df["drive_wheels"].value_counts() drive_wheels_encoded = ordinalEncoder.fit_transform(df[["drive_wheels"]]) df_encoding["drive_wheels"] = drive_wheels_encoded # # > OneHot Encoder # # drive_wheels_encoded = hotEncoder.fit_transform(df[['drive_wheels']]) # drive_wheels_df = pd.DataFrame(drive_wheels_encoded, columns=hotEncoder.categories_[0].tolist()[1:],index=df['drive_wheels'].index) # drive_wheels_df.head() # df_encoding=pd.concat([df_encoding.drop('drive_wheels', axis=1), drive_wheels_df], axis=1) # df_encoding.head() # ### wheel df["wheel"].unique() df_encoding["wheel"] = df["wheel"].map({"Left wheel": 0, "Right-hand drive": 1}) df_encoding.head() df_encoding.head() # ### color df_encoding.drop(["color"], axis=1, inplace=True) df_encoding.head(5) # ## ML Model df_cleaned = df_encoding.copy() df_cleaned.drop_duplicates(inplace=True) df_cleaned.head(5) # ### Train and test split x = df_cleaned.drop("price", axis=1) y = df_cleaned["price"] x.shape, y.shape X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=42 ) X_train y_train # ### Scaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) def caluc_acc(model): model_y_pred = model.predict(X_test) train_score = model.score(X_train, y_train) print(f"the accuracy of training is: {train_score*100}") model_accuracy = ( round(r2_score(y_test, model_y_pred), 3) * 100 ) # use r2_score as the regression metric print("{:s} : {:.0f}%".format(model.__class__.__name__, model_accuracy)) # ### **Linear** *Regression* lr_model = LinearRegression() lr_model.fit(X_train, y_train) y_pred = lr_model.predict(X_test) y_pred # y_pred = lr_model.predict(X_tesy) # lr_accuracy =round(r2_score(y_test, y_pred),3)*100 # use r2_score as the regression metric train_score = lr_model.score(X_train, y_train) test_score = lr_model.score(X_test, y_test) print(f"the accuracy of training is: {train_score*100}") print(f"the accuracy of testing is: {test_score}") # ### RandomForestRegressor # rfr = RandomForestRegressor(n_estimators=180) rfr.fit(X_train, y_train) train_score = rfr.score(X_train, y_train) test_score = rfr.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") scores = cross_val_score(rfr, X_train, y_train, scoring="r2", cv=5) scores # ### XGB Regressor xgb = XGBRegressor(n_estimators=150, learning_rate=0.2) xgb.fit(X_train, y_train) yhat = xgb.predict(X_test) train_score = xgb.score(X_train, y_train) test_score = xgb.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") # ### Grid Search param_grid = { "learning_rate": [0.1, 0.01, 0.001], "max_depth": [3, 5, 7], "n_estimators": [100, 500, 1000], } grid_search = GridSearchCV( estimator=XGBRegressor(), param_grid=param_grid, scoring="neg_mean_squared_error", cv=5, ) grid_search.fit(X_train, y_train) # X and y are your training data best_params = grid_search.best_params_ best_model = grid_search.best_estimator_ best_params best_model y_pred = best_model.predict(X_test) # X_test is your test data print("MAE: %.3f" % mean_absolute_error(y_test, y_pred)) # ### Best Model xgb = XGBRegressor(n_estimators=800, learning_rate=0.1, max_depth=5) xgb.fit(X_train, y_train) yhat = xgb.predict(X_test) train_score = xgb.score(X_train, y_train) test_score = xgb.score(X_test, y_test) print(f"the accuracy of training is: {train_score}") print(f"the accuracy of testing is: {test_score}") mae = mean_absolute_error(y_test, yhat) print("MAE: %.3f" % mae) mse = mean_squared_error(y_test, yhat) mse np.sqrt(mse) 1976.6206567153217 / df["price"].sum() mean_absolute_percentage_error(yhat, y_test) r2_score(yhat, y_test) output_2 = pd.DataFrame({"Price": yhat, "real_price": y_test}) output_2 scores = cross_val_score(xgb, x, y, scoring="r2", cv=10) scores plt.scatter(yhat, y_test) plt.xlabel("Predicted values") plt.ylabel("Actual values") plt.show()
[{"car-price-prediction-challenge/car_price_prediction.csv": {"column_names": "[\"ID\", \"Price\", \"Levy\", \"Manufacturer\", \"Model\", \"Prod. year\", \"Category\", \"Leather interior\", \"Fuel type\", \"Engine volume\", \"Mileage\", \"Cylinders\", \"Gear box type\", \"Drive wheels\", \"Doors\", \"Wheel\", \"Color\", \"Airbags\"]", "column_data_types": "{\"ID\": \"int64\", \"Price\": \"int64\", \"Levy\": \"object\", \"Manufacturer\": \"object\", \"Model\": \"object\", \"Prod. year\": \"int64\", \"Category\": \"object\", \"Leather interior\": \"object\", \"Fuel type\": \"object\", \"Engine volume\": \"object\", \"Mileage\": \"object\", \"Cylinders\": \"float64\", \"Gear box type\": \"object\", \"Drive wheels\": \"object\", \"Doors\": \"object\", \"Wheel\": \"object\", \"Color\": \"object\", \"Airbags\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 19237 entries, 0 to 19236\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ID 19237 non-null int64 \n 1 Price 19237 non-null int64 \n 2 Levy 19237 non-null object \n 3 Manufacturer 19237 non-null object \n 4 Model 19237 non-null object \n 5 Prod. year 19237 non-null int64 \n 6 Category 19237 non-null object \n 7 Leather interior 19237 non-null object \n 8 Fuel type 19237 non-null object \n 9 Engine volume 19237 non-null object \n 10 Mileage 19237 non-null object \n 11 Cylinders 19237 non-null float64\n 12 Gear box type 19237 non-null object \n 13 Drive wheels 19237 non-null object \n 14 Doors 19237 non-null object \n 15 Wheel 19237 non-null object \n 16 Color 19237 non-null object \n 17 Airbags 19237 non-null int64 \ndtypes: float64(1), int64(4), object(13)\nmemory usage: 2.6+ MB\n", "summary": "{\"ID\": {\"count\": 19237.0, \"mean\": 45576535.886104904, \"std\": 936591.4227992407, \"min\": 20746880.0, \"25%\": 45698374.0, \"50%\": 45772308.0, \"75%\": 45802036.0, \"max\": 45816654.0}, \"Price\": {\"count\": 19237.0, \"mean\": 18555.92722357956, \"std\": 190581.26968400914, \"min\": 1.0, \"25%\": 5331.0, \"50%\": 13172.0, \"75%\": 22075.0, \"max\": 26307500.0}, \"Prod. year\": {\"count\": 19237.0, \"mean\": 2010.9128242449447, \"std\": 5.668672994387615, \"min\": 1939.0, \"25%\": 2009.0, \"50%\": 2012.0, \"75%\": 2015.0, \"max\": 2020.0}, \"Cylinders\": {\"count\": 19237.0, \"mean\": 4.582991110880075, \"std\": 1.1999331679654894, \"min\": 1.0, \"25%\": 4.0, \"50%\": 4.0, \"75%\": 4.0, \"max\": 16.0}, \"Airbags\": {\"count\": 19237.0, \"mean\": 6.582627228777876, \"std\": 4.320168394922048, \"min\": 0.0, \"25%\": 4.0, \"50%\": 6.0, \"75%\": 12.0, \"max\": 16.0}}", "examples": "{\"ID\":{\"0\":45654403,\"1\":44731507,\"2\":45774419,\"3\":45769185},\"Price\":{\"0\":13328,\"1\":16621,\"2\":8467,\"3\":3607},\"Levy\":{\"0\":\"1399\",\"1\":\"1018\",\"2\":\"-\",\"3\":\"862\"},\"Manufacturer\":{\"0\":\"LEXUS\",\"1\":\"CHEVROLET\",\"2\":\"HONDA\",\"3\":\"FORD\"},\"Model\":{\"0\":\"RX 450\",\"1\":\"Equinox\",\"2\":\"FIT\",\"3\":\"Escape\"},\"Prod. year\":{\"0\":2010,\"1\":2011,\"2\":2006,\"3\":2011},\"Category\":{\"0\":\"Jeep\",\"1\":\"Jeep\",\"2\":\"Hatchback\",\"3\":\"Jeep\"},\"Leather interior\":{\"0\":\"Yes\",\"1\":\"No\",\"2\":\"No\",\"3\":\"Yes\"},\"Fuel type\":{\"0\":\"Hybrid\",\"1\":\"Petrol\",\"2\":\"Petrol\",\"3\":\"Hybrid\"},\"Engine volume\":{\"0\":\"3.5\",\"1\":\"3\",\"2\":\"1.3\",\"3\":\"2.5\"},\"Mileage\":{\"0\":\"186005 km\",\"1\":\"192000 km\",\"2\":\"200000 km\",\"3\":\"168966 km\"},\"Cylinders\":{\"0\":6.0,\"1\":6.0,\"2\":4.0,\"3\":4.0},\"Gear box type\":{\"0\":\"Automatic\",\"1\":\"Tiptronic\",\"2\":\"Variator\",\"3\":\"Automatic\"},\"Drive wheels\":{\"0\":\"4x4\",\"1\":\"4x4\",\"2\":\"Front\",\"3\":\"4x4\"},\"Doors\":{\"0\":\"04-May\",\"1\":\"04-May\",\"2\":\"04-May\",\"3\":\"04-May\"},\"Wheel\":{\"0\":\"Left wheel\",\"1\":\"Left wheel\",\"2\":\"Right-hand drive\",\"3\":\"Left wheel\"},\"Color\":{\"0\":\"Silver\",\"1\":\"Black\",\"2\":\"Black\",\"3\":\"White\"},\"Airbags\":{\"0\":12,\"1\":8,\"2\":2,\"3\":0}}"}}]
true
1
<start_data_description><data_path>car-price-prediction-challenge/car_price_prediction.csv: <column_names> ['ID', 'Price', 'Levy', 'Manufacturer', 'Model', 'Prod. year', 'Category', 'Leather interior', 'Fuel type', 'Engine volume', 'Mileage', 'Cylinders', 'Gear box type', 'Drive wheels', 'Doors', 'Wheel', 'Color', 'Airbags'] <column_types> {'ID': 'int64', 'Price': 'int64', 'Levy': 'object', 'Manufacturer': 'object', 'Model': 'object', 'Prod. year': 'int64', 'Category': 'object', 'Leather interior': 'object', 'Fuel type': 'object', 'Engine volume': 'object', 'Mileage': 'object', 'Cylinders': 'float64', 'Gear box type': 'object', 'Drive wheels': 'object', 'Doors': 'object', 'Wheel': 'object', 'Color': 'object', 'Airbags': 'int64'} <dataframe_Summary> {'ID': {'count': 19237.0, 'mean': 45576535.886104904, 'std': 936591.4227992407, 'min': 20746880.0, '25%': 45698374.0, '50%': 45772308.0, '75%': 45802036.0, 'max': 45816654.0}, 'Price': {'count': 19237.0, 'mean': 18555.92722357956, 'std': 190581.26968400914, 'min': 1.0, '25%': 5331.0, '50%': 13172.0, '75%': 22075.0, 'max': 26307500.0}, 'Prod. year': {'count': 19237.0, 'mean': 2010.9128242449447, 'std': 5.668672994387615, 'min': 1939.0, '25%': 2009.0, '50%': 2012.0, '75%': 2015.0, 'max': 2020.0}, 'Cylinders': {'count': 19237.0, 'mean': 4.582991110880075, 'std': 1.1999331679654894, 'min': 1.0, '25%': 4.0, '50%': 4.0, '75%': 4.0, 'max': 16.0}, 'Airbags': {'count': 19237.0, 'mean': 6.582627228777876, 'std': 4.320168394922048, 'min': 0.0, '25%': 4.0, '50%': 6.0, '75%': 12.0, 'max': 16.0}} <dataframe_info> RangeIndex: 19237 entries, 0 to 19236 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 19237 non-null int64 1 Price 19237 non-null int64 2 Levy 19237 non-null object 3 Manufacturer 19237 non-null object 4 Model 19237 non-null object 5 Prod. year 19237 non-null int64 6 Category 19237 non-null object 7 Leather interior 19237 non-null object 8 Fuel type 19237 non-null object 9 Engine volume 19237 non-null object 10 Mileage 19237 non-null object 11 Cylinders 19237 non-null float64 12 Gear box type 19237 non-null object 13 Drive wheels 19237 non-null object 14 Doors 19237 non-null object 15 Wheel 19237 non-null object 16 Color 19237 non-null object 17 Airbags 19237 non-null int64 dtypes: float64(1), int64(4), object(13) memory usage: 2.6+ MB <some_examples> {'ID': {'0': 45654403, '1': 44731507, '2': 45774419, '3': 45769185}, 'Price': {'0': 13328, '1': 16621, '2': 8467, '3': 3607}, 'Levy': {'0': '1399', '1': '1018', '2': '-', '3': '862'}, 'Manufacturer': {'0': 'LEXUS', '1': 'CHEVROLET', '2': 'HONDA', '3': 'FORD'}, 'Model': {'0': 'RX 450', '1': 'Equinox', '2': 'FIT', '3': 'Escape'}, 'Prod. year': {'0': 2010, '1': 2011, '2': 2006, '3': 2011}, 'Category': {'0': 'Jeep', '1': 'Jeep', '2': 'Hatchback', '3': 'Jeep'}, 'Leather interior': {'0': 'Yes', '1': 'No', '2': 'No', '3': 'Yes'}, 'Fuel type': {'0': 'Hybrid', '1': 'Petrol', '2': 'Petrol', '3': 'Hybrid'}, 'Engine volume': {'0': '3.5', '1': '3', '2': '1.3', '3': '2.5'}, 'Mileage': {'0': '186005 km', '1': '192000 km', '2': '200000 km', '3': '168966 km'}, 'Cylinders': {'0': 6.0, '1': 6.0, '2': 4.0, '3': 4.0}, 'Gear box type': {'0': 'Automatic', '1': 'Tiptronic', '2': 'Variator', '3': 'Automatic'}, 'Drive wheels': {'0': '4x4', '1': '4x4', '2': 'Front', '3': '4x4'}, 'Doors': {'0': '04-May', '1': '04-May', '2': '04-May', '3': '04-May'}, 'Wheel': {'0': 'Left wheel', '1': 'Left wheel', '2': 'Right-hand drive', '3': 'Left wheel'}, 'Color': {'0': 'Silver', '1': 'Black', '2': 'Black', '3': 'White'}, 'Airbags': {'0': 12, '1': 8, '2': 2, '3': 0}} <end_description>
8,446
0
9,926
8,446
129450907
<jupyter_start><jupyter_text>Electric Vehicle Data 2023 ``` This dataset shows the Battery Electric Vehicles (BEVs) and Plug-in Hybrid Electric Vehicles (PHEVs) that are currently registered through Washington State Department of Licensing (DOL). ``` ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F13364933%2Fcdf43b735468100117dd44a05b80a5e5%2FUntitled-1024--768px-6.jpg?generation=1689176985582069&alt=media) Kaggle dataset identifier: electric-vehicle-population-data <jupyter_script>import pandas as pd import plotly.express as px import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns from sklearn.linear_model import LinearRegression import re df = pd.read_csv( "/kaggle/input/electric-vehicle-population-data/Electric_Vehicle_Population_Data.csv" ) # # Dataset comes from Washington State department of motor vehicles. It describes a subset of the electric vehicle population in the united states def createDict(x_val, y_val, text_val, showarrow_val=True): """ Helper function for creating annotations """ return dict(x=x_val, y=y_val, text=text_val, bgcolor=None, showarrow=showarrow_val) def extract_lat_lon(df, column_name): # Create new columns for latitude and longitude df["latitude"] = None df["longitude"] = None # Define the regular expression pattern to match the longitude and latitude values pattern = r"^POINT \((?P<longitude>-?\d+\.\d+) (?P<latitude>-?\d+\.\d+)\)$" # Loop through each row in the DataFrame for i, row in df.iterrows(): # Extract the longitude and latitude values using the regular expression pattern if pd.isna(row[column_name]): continue match = re.match(pattern, row[column_name]) if match: longitude = float(match.group("longitude")) latitude = float(match.group("latitude")) # Assign the latitude and longitude values to the new columns df.at[i, "latitude"] = latitude df.at[i, "longitude"] = longitude extract_lat_lon(df, "Vehicle Location") fig = go.Figure( go.Scattermapbox( lat=df["latitude"], lon=df["longitude"], mode="markers", marker=go.scattermapbox.Marker(size=14), ) ) fig.update_layout( mapbox_style="open-street-map", mapbox=dict(center=dict(lat=37.0902, lon=-95.7129), zoom=3), title="Map of Vehicle Locations", plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_font_color="white", ) fig.update_layout(title_x=0.5) fig.show() # - A graph of locations of all cars in this dataset # - Majority, but not all, are located within the state of Washington top_20_values = df["Make"].value_counts().nlargest(20).index.tolist() df_filtered = df[df["Make"].isin(top_20_values)] fig = px.histogram(df_filtered, x="Make") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Top 20 count of Vehicles by Make", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#F374AE")) fig.show() top_20_values = df["Model"].value_counts().nlargest(20).index.tolist() df_filtered = df[df["Model"].isin(top_20_values)] fig = px.histogram(df_filtered, x="Model") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Top 20 count of Vehicles by Model", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#D90368")) fig.show() # - An overwhelming majority of electric cars are Tesla's. # - It follows an inverse graph and drops off to represent a larger range of vehicles. Many of these cars # - are only partially electric fig = px.histogram(df, x="Model Year") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Models by Year", font=dict(size=24, family="Arial", color="white") ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#F6AE2D")) fig.show() # - After the year 2010 the amount of electric cars sold has risen dramatically. It dropped in recent years, however this is likely due to poor data. As you'll see in following graphs, Tesla's are not logged after 2021, depsite continuing to sell many cars fig = px.scatter(df, x="Model Year", y="Electric Range") fig.update_layout( title=dict( text="Electric Mileage by Year", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict( title="Electric Mileage", titlefont=dict(size=18, family="Arial", color="white") ), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#2191FB")) fig.show() # - Theres a large range in the electric mileage by year. This is due to there being a large amount of # - vehicles that are partially electric. avg_mileage = ( df.groupby("Model Year")["Electric Range"].agg(["mean", "std"]).reset_index() ) fig = px.scatter(avg_mileage, x="Model Year", y="mean", trendline="ols", error_y="std") trendlineShape = { "type": "line", "x0": 2012, "x1": 2020, "y0": 51, "y1": 211, "line": {"color": "red", "width": 2, "dash": "dash"}, } circleShape = { "type": "circle", "xref": "x", "yref": "y", "x0": 2021, "y0": 50, "x1": 2023, "y1": 325, "line": {"color": "orange", "width": 2, "dash": "dot"}, } fig.update_layout( title=dict( text="Electric Range by Year", font=dict(size=24, family="Arial", color="white") ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", shapes=[trendlineShape, circleShape], annotations=[ createDict("2015", 120, "Rapid advancement by about 20 miles per year"), createDict("2021", 325, "Why the sudden decrease?"), ], ) fig.update_traces(marker=dict(color="#3CBBB1")) fig.show() # The average range for electric vehicles is going up, but what's going on in 2021,2022, and 2023? # Why is there such a drop. Could there be missing data? # Tesla is a leader in the electric car market. Let's see what's going on with tesla in these year median = ( df.where((df["Make"] == "TESLA") & (df["Model Year"].isin([2021, 2022, 2023]))) .dropna()["Electric Range"] .mean() ) print( f"The electric range for Tesla's in 2021,2022, and 2023 are {median}. This is what's causing the skew" ) # - Tesla's in this dataset for 2021,2022, and 2023 have mileage of 0! Whats going on avg_mileage_by_model = ( df.groupby(["Make", "Model"])["Electric Range"].agg(["mean"]).reset_index() ) top_10 = avg_mileage_by_model.sort_values("mean", ascending=False).head(10) fig = go.Figure( data=[ go.Table( header=dict( values=["Make", "Model", "Electric Range"], font=dict(color="white"), line_color="#858AE3", fill_color="#858AE3", ), cells=dict( values=[top_10["Make"], top_10["Model"], top_10["mean"]], format=[None, None, ".0f"], line_color="black", fill_color="#1E1E24", font=dict(color="white"), ), ) ] ) fig.update_layout( title="Electric Range by Model", title_x=0.5, height=500, width=800, plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_font=dict(color="white"), ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450907.ipynb
electric-vehicle-population-data
utkarshx27
[{"Id": 129450907, "ScriptId": 38490701, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8803610, "CreationDate": "05/14/2023 00:03:17", "VersionNumber": 1.0, "Title": "notebook3bd1ae1c6f", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 348.0, "LinesInsertedFromPrevious": 348.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185514240, "KernelVersionId": 129450907, "SourceDatasetVersionId": 5555167}]
[{"Id": 5555167, "DatasetId": 3199724, "DatasourceVersionId": 5629937, "CreatorUserId": 13364933, "LicenseName": "Other (specified in description)", "CreationDate": "04/29/2023 07:22:29", "VersionNumber": 1.0, "Title": "Electric Vehicle Data 2023", "Slug": "electric-vehicle-population-data", "Subtitle": "EV and PHEV Dataset: Registration and Characteristics", "Description": "```\nThis dataset shows the Battery Electric Vehicles (BEVs) and Plug-in Hybrid Electric Vehicles (PHEVs) that are currently registered through Washington State Department of Licensing (DOL).\n```\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F13364933%2Fcdf43b735468100117dd44a05b80a5e5%2FUntitled-1024--768px-6.jpg?generation=1689176985582069&alt=media)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3199724, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5555167.0, "CurrentDatasourceVersionId": 5629937.0, "ForumId": 3264368, "Type": 2, "CreationDate": "04/29/2023 07:22:29", "LastActivityDate": "04/29/2023", "TotalViews": 11065, "TotalDownloads": 1838, "TotalVotes": 39, "TotalKernels": 8}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import pandas as pd import plotly.express as px import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns from sklearn.linear_model import LinearRegression import re df = pd.read_csv( "/kaggle/input/electric-vehicle-population-data/Electric_Vehicle_Population_Data.csv" ) # # Dataset comes from Washington State department of motor vehicles. It describes a subset of the electric vehicle population in the united states def createDict(x_val, y_val, text_val, showarrow_val=True): """ Helper function for creating annotations """ return dict(x=x_val, y=y_val, text=text_val, bgcolor=None, showarrow=showarrow_val) def extract_lat_lon(df, column_name): # Create new columns for latitude and longitude df["latitude"] = None df["longitude"] = None # Define the regular expression pattern to match the longitude and latitude values pattern = r"^POINT \((?P<longitude>-?\d+\.\d+) (?P<latitude>-?\d+\.\d+)\)$" # Loop through each row in the DataFrame for i, row in df.iterrows(): # Extract the longitude and latitude values using the regular expression pattern if pd.isna(row[column_name]): continue match = re.match(pattern, row[column_name]) if match: longitude = float(match.group("longitude")) latitude = float(match.group("latitude")) # Assign the latitude and longitude values to the new columns df.at[i, "latitude"] = latitude df.at[i, "longitude"] = longitude extract_lat_lon(df, "Vehicle Location") fig = go.Figure( go.Scattermapbox( lat=df["latitude"], lon=df["longitude"], mode="markers", marker=go.scattermapbox.Marker(size=14), ) ) fig.update_layout( mapbox_style="open-street-map", mapbox=dict(center=dict(lat=37.0902, lon=-95.7129), zoom=3), title="Map of Vehicle Locations", plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_font_color="white", ) fig.update_layout(title_x=0.5) fig.show() # - A graph of locations of all cars in this dataset # - Majority, but not all, are located within the state of Washington top_20_values = df["Make"].value_counts().nlargest(20).index.tolist() df_filtered = df[df["Make"].isin(top_20_values)] fig = px.histogram(df_filtered, x="Make") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Top 20 count of Vehicles by Make", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#F374AE")) fig.show() top_20_values = df["Model"].value_counts().nlargest(20).index.tolist() df_filtered = df[df["Model"].isin(top_20_values)] fig = px.histogram(df_filtered, x="Model") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Top 20 count of Vehicles by Model", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#D90368")) fig.show() # - An overwhelming majority of electric cars are Tesla's. # - It follows an inverse graph and drops off to represent a larger range of vehicles. Many of these cars # - are only partially electric fig = px.histogram(df, x="Model Year") fig.update_xaxes(categoryorder="total descending") fig.update_layout( title=dict( text="Models by Year", font=dict(size=24, family="Arial", color="white") ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#F6AE2D")) fig.show() # - After the year 2010 the amount of electric cars sold has risen dramatically. It dropped in recent years, however this is likely due to poor data. As you'll see in following graphs, Tesla's are not logged after 2021, depsite continuing to sell many cars fig = px.scatter(df, x="Model Year", y="Electric Range") fig.update_layout( title=dict( text="Electric Mileage by Year", font=dict(size=24, family="Arial", color="white"), ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict( title="Electric Mileage", titlefont=dict(size=18, family="Arial", color="white") ), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", ) fig.update_traces(marker=dict(color="#2191FB")) fig.show() # - Theres a large range in the electric mileage by year. This is due to there being a large amount of # - vehicles that are partially electric. avg_mileage = ( df.groupby("Model Year")["Electric Range"].agg(["mean", "std"]).reset_index() ) fig = px.scatter(avg_mileage, x="Model Year", y="mean", trendline="ols", error_y="std") trendlineShape = { "type": "line", "x0": 2012, "x1": 2020, "y0": 51, "y1": 211, "line": {"color": "red", "width": 2, "dash": "dash"}, } circleShape = { "type": "circle", "xref": "x", "yref": "y", "x0": 2021, "y0": 50, "x1": 2023, "y1": 325, "line": {"color": "orange", "width": 2, "dash": "dot"}, } fig.update_layout( title=dict( text="Electric Range by Year", font=dict(size=24, family="Arial", color="white") ), xaxis=dict(titlefont=dict(size=18, family="Arial", color="white")), yaxis=dict(title="Count", titlefont=dict(size=18, family="Arial", color="white")), plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_x=0.5, template="plotly_dark", shapes=[trendlineShape, circleShape], annotations=[ createDict("2015", 120, "Rapid advancement by about 20 miles per year"), createDict("2021", 325, "Why the sudden decrease?"), ], ) fig.update_traces(marker=dict(color="#3CBBB1")) fig.show() # The average range for electric vehicles is going up, but what's going on in 2021,2022, and 2023? # Why is there such a drop. Could there be missing data? # Tesla is a leader in the electric car market. Let's see what's going on with tesla in these year median = ( df.where((df["Make"] == "TESLA") & (df["Model Year"].isin([2021, 2022, 2023]))) .dropna()["Electric Range"] .mean() ) print( f"The electric range for Tesla's in 2021,2022, and 2023 are {median}. This is what's causing the skew" ) # - Tesla's in this dataset for 2021,2022, and 2023 have mileage of 0! Whats going on avg_mileage_by_model = ( df.groupby(["Make", "Model"])["Electric Range"].agg(["mean"]).reset_index() ) top_10 = avg_mileage_by_model.sort_values("mean", ascending=False).head(10) fig = go.Figure( data=[ go.Table( header=dict( values=["Make", "Model", "Electric Range"], font=dict(color="white"), line_color="#858AE3", fill_color="#858AE3", ), cells=dict( values=[top_10["Make"], top_10["Model"], top_10["mean"]], format=[None, None, ".0f"], line_color="black", fill_color="#1E1E24", font=dict(color="white"), ), ) ] ) fig.update_layout( title="Electric Range by Model", title_x=0.5, height=500, width=800, plot_bgcolor="#1E1E24", paper_bgcolor="#1E1E24", title_font=dict(color="white"), ) fig.show()
false
1
2,602
2
2,798
2,602
129450316
revenue = input() revenue = input("what is the revenue of your department") print("the entered value is", revenue) type(revenue) First_Name = input("Enter your first Name") Second_Name = input("Enter your Second Name") print("my name is", First_Name, Second_Name)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450316.ipynb
null
null
[{"Id": 129450316, "ScriptId": 38490477, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13282439, "CreationDate": "05/13/2023 23:52:19", "VersionNumber": 1.0, "Title": "Python Basics", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 14.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
revenue = input() revenue = input("what is the revenue of your department") print("the entered value is", revenue) type(revenue) First_Name = input("Enter your first Name") Second_Name = input("Enter your Second Name") print("my name is", First_Name, Second_Name)
false
0
77
0
77
77
129450360
<jupyter_start><jupyter_text>Vancouver Home Price Analysis - Regression Kaggle dataset identifier: vancouver-home-price-analysis-regression <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/vancouver-home-price-analysis-regression/House sale data Vancouver.csv" ) # df= pd.read_csv('./House sale data Vancouver.csv') df.head(5) df.shape df.dtypes df.describe().T # * The dataset seems has no null or empty value. # # **0. Prepare Data** # Drop "Number" and "Year Built" columns because they are not helpful for the dataset df = df.loc[:, ~df.columns.isin(["Number", "Year Built"])].copy() df.head(5) # Check whether there are duplicates df.loc[df.duplicated()] # * Glad to see that the dataset has no duplicated rows # Assume that there are no significant differences in house prices on the same street. # remove the street number from Address # create a new column named Street then remove the Address column street = [] for i in range(len(df["Address"])): street.append(df["Address"][i].split(" ", 1)[1]) df["Street"] = street df.drop(["Address"], axis=1, inplace=True) df.head(5) # Prices vary by month and year. We need to figure out the date of deal. # Only keep the digits of month and year for List Date df["List Date"] = pd.to_datetime(df["List Date"], yearfirst=True) dealdate = [] for i in range(len(df["List Date"])): dealdate.append(df["List Date"][i] + pd.Timedelta(days=df["Days on market"][i])) df["Deal Date"] = dealdate df.drop(["List Date", "Days on market"], axis=1, inplace=True) df["Deal Date"] = df["Deal Date"].dt.to_period("M") df.head(5) # # **I. EDA** # ## I.1. Univariate # ### *I.1.1. Price* df["Price"].describe() fig, axs = plt.subplots(ncols=2, figsize=(10, 4)) sns.histplot(data=df, x="Price", kde=True, bins=30, ax=axs[0]) axs[0].set_xlabel("House Price in Million") sns.boxplot(x=df["Price"], ax=axs[1]) axs[1].set_xlabel("House Price in Million") plt.show() # ### *I.1.2. Deal Date* df.groupby(by=["Deal Date"]).count()["Price"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.barplot( data=df.groupby(by=["Deal Date"]).count(), y=df.groupby(by=["Deal Date"]).count().index, x=df.groupby(by=["Deal Date"]).count()["Price"], ax=axs[0], ) axs[0].set_xlabel("Count") sns.boxplot( data=df.groupby(by=["Deal Date"]).count(), y=df.groupby(by=["Deal Date"]).count()["Price"], ax=axs[1], ) axs[1].set_xlabel("Count") plt.show() # ### *I.1.3. Street* df["Street"].value_counts().head(20) ax = df["Street"].value_counts().head(20).plot(kind="barh", title="Top 20 Street") ax.set_xlabel("Count") ax.set_ylabel("Street Name") plt.show() # ### *I.1.4. Age* df["Age"].value_counts().head(20) ax = df["Age"].value_counts().head(20).plot(kind="barh", title="Top 20 Age ") ax.set_xlabel("Count") ax.set_ylabel("House Age") plt.show() # * It seems that new houses and century-old houses are the most popular. # ### *I.1.5 Lot Size* df["Lot Size"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.histplot(data=df, x="Lot Size", kde=True, bins=100, ax=axs[0]) axs[0].set_xlabel("Lot Size") sns.boxplot(data=df, x="Lot Size", ax=axs[1]) axs[1].set_xlabel("Lot Size") plt.show() # * The house with lot size of around four thousand is the most popular. # * The houses with more than 8000 lot size are rare. # ### *I.1.6 Total Floor Area* df["Total floor area"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.histplot(data=df, x="Total floor area", kde=True, bins=100, ax=axs[0]) ax.set_xlabel("Total floor area") sns.boxplot(data=df, x="Total floor area", ax=axs[1]) plt.show() # * The houses with the total floor area which is less than 700 or more than 4000 are rare. # ## I.2 Multivariate df.dtypes # * *Deal Date* is ordinal while the *Street* is nominal. # * Most houses fall within the range of less than 10,000 Lot Size and less than 5,000 Total floor area # * Others are nearly evenly distributed # ### *I.2.1. Encode categorical features as an integer array* # Oridal data goes for factorize codes, uniques = pd.factorize(df["Deal Date"], sort=True) df["Deal Date"] = codes from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(df["Street"]) df["Street"] = le.transform(df["Street"]) sns.pairplot( df, kind="hist", vars=["Price", "Total floor area", "Lot Size", "Age", "Street"] ) plt.show() # ### *I.2.2. Correlations* sns.heatmap( df[["Price", "Total floor area", "Age", "Lot Size", "Deal Date", "Street"]].corr(), annot=True, fmt=".2f", ) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450360.ipynb
vancouver-home-price-analysis-regression
darianghorbanian
[{"Id": 129450360, "ScriptId": 38490589, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15061325, "CreationDate": "05/13/2023 23:53:00", "VersionNumber": 1.0, "Title": "notebook4663025bfb", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 201.0, "LinesInsertedFromPrevious": 201.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185511974, "KernelVersionId": 129450360, "SourceDatasetVersionId": 4788889}]
[{"Id": 4788889, "DatasetId": 2772332, "DatasourceVersionId": 4852270, "CreatorUserId": 11813352, "LicenseName": "Unknown", "CreationDate": "12/30/2022 09:34:46", "VersionNumber": 1.0, "Title": "Vancouver Home Price Analysis - Regression", "Slug": "vancouver-home-price-analysis-regression", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2772332, "CreatorUserId": 11813352, "OwnerUserId": 11813352.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4788889.0, "CurrentDatasourceVersionId": 4852270.0, "ForumId": 2806261, "Type": 2, "CreationDate": "12/30/2022 09:34:46", "LastActivityDate": "12/30/2022", "TotalViews": 564, "TotalDownloads": 77, "TotalVotes": 5, "TotalKernels": 1}]
[{"Id": 11813352, "UserName": "darianghorbanian", "DisplayName": "Darian Ghorbanian", "RegisterDate": "10/04/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/vancouver-home-price-analysis-regression/House sale data Vancouver.csv" ) # df= pd.read_csv('./House sale data Vancouver.csv') df.head(5) df.shape df.dtypes df.describe().T # * The dataset seems has no null or empty value. # # **0. Prepare Data** # Drop "Number" and "Year Built" columns because they are not helpful for the dataset df = df.loc[:, ~df.columns.isin(["Number", "Year Built"])].copy() df.head(5) # Check whether there are duplicates df.loc[df.duplicated()] # * Glad to see that the dataset has no duplicated rows # Assume that there are no significant differences in house prices on the same street. # remove the street number from Address # create a new column named Street then remove the Address column street = [] for i in range(len(df["Address"])): street.append(df["Address"][i].split(" ", 1)[1]) df["Street"] = street df.drop(["Address"], axis=1, inplace=True) df.head(5) # Prices vary by month and year. We need to figure out the date of deal. # Only keep the digits of month and year for List Date df["List Date"] = pd.to_datetime(df["List Date"], yearfirst=True) dealdate = [] for i in range(len(df["List Date"])): dealdate.append(df["List Date"][i] + pd.Timedelta(days=df["Days on market"][i])) df["Deal Date"] = dealdate df.drop(["List Date", "Days on market"], axis=1, inplace=True) df["Deal Date"] = df["Deal Date"].dt.to_period("M") df.head(5) # # **I. EDA** # ## I.1. Univariate # ### *I.1.1. Price* df["Price"].describe() fig, axs = plt.subplots(ncols=2, figsize=(10, 4)) sns.histplot(data=df, x="Price", kde=True, bins=30, ax=axs[0]) axs[0].set_xlabel("House Price in Million") sns.boxplot(x=df["Price"], ax=axs[1]) axs[1].set_xlabel("House Price in Million") plt.show() # ### *I.1.2. Deal Date* df.groupby(by=["Deal Date"]).count()["Price"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.barplot( data=df.groupby(by=["Deal Date"]).count(), y=df.groupby(by=["Deal Date"]).count().index, x=df.groupby(by=["Deal Date"]).count()["Price"], ax=axs[0], ) axs[0].set_xlabel("Count") sns.boxplot( data=df.groupby(by=["Deal Date"]).count(), y=df.groupby(by=["Deal Date"]).count()["Price"], ax=axs[1], ) axs[1].set_xlabel("Count") plt.show() # ### *I.1.3. Street* df["Street"].value_counts().head(20) ax = df["Street"].value_counts().head(20).plot(kind="barh", title="Top 20 Street") ax.set_xlabel("Count") ax.set_ylabel("Street Name") plt.show() # ### *I.1.4. Age* df["Age"].value_counts().head(20) ax = df["Age"].value_counts().head(20).plot(kind="barh", title="Top 20 Age ") ax.set_xlabel("Count") ax.set_ylabel("House Age") plt.show() # * It seems that new houses and century-old houses are the most popular. # ### *I.1.5 Lot Size* df["Lot Size"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.histplot(data=df, x="Lot Size", kde=True, bins=100, ax=axs[0]) axs[0].set_xlabel("Lot Size") sns.boxplot(data=df, x="Lot Size", ax=axs[1]) axs[1].set_xlabel("Lot Size") plt.show() # * The house with lot size of around four thousand is the most popular. # * The houses with more than 8000 lot size are rare. # ### *I.1.6 Total Floor Area* df["Total floor area"].describe() fig, axs = plt.subplots(ncols=2, figsize=(12, 4)) sns.histplot(data=df, x="Total floor area", kde=True, bins=100, ax=axs[0]) ax.set_xlabel("Total floor area") sns.boxplot(data=df, x="Total floor area", ax=axs[1]) plt.show() # * The houses with the total floor area which is less than 700 or more than 4000 are rare. # ## I.2 Multivariate df.dtypes # * *Deal Date* is ordinal while the *Street* is nominal. # * Most houses fall within the range of less than 10,000 Lot Size and less than 5,000 Total floor area # * Others are nearly evenly distributed # ### *I.2.1. Encode categorical features as an integer array* # Oridal data goes for factorize codes, uniques = pd.factorize(df["Deal Date"], sort=True) df["Deal Date"] = codes from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(df["Street"]) df["Street"] = le.transform(df["Street"]) sns.pairplot( df, kind="hist", vars=["Price", "Total floor area", "Lot Size", "Age", "Street"] ) plt.show() # ### *I.2.2. Correlations* sns.heatmap( df[["Price", "Total floor area", "Age", "Lot Size", "Deal Date", "Street"]].corr(), annot=True, fmt=".2f", ) plt.show()
false
1
1,753
0
1,788
1,753
129450016
# Making this public for application to Atlas Fellowship # **Note:** You cannot run this code on Kaggle as you need an Nvidia A40 GPU. Either reduce batch size/image size or use local hardware/cloud computing other than Kaggle. import timm timm.__version__ # # Imports import pandas as pd import timm from torch import nn import torch import torchaudio as ta from torch.cuda.amp import autocast import random from torch.nn import functional as F from torch.distributions import Beta from torch.nn.parameter import Parameter from torch.utils.data import Dataset import numpy as np import librosa import ast import multiprocessing as mp import os from types import SimpleNamespace import numpy as np import numpy as np import pandas as pd import importlib import sys import random from tqdm import tqdm import gc import argparse import torch from torch import optim from torch.cuda.amp import GradScaler, autocast from collections import defaultdict import cv2 from copy import copy import os import transformers from transformers import get_cosine_schedule_with_warmup from torch.utils.data import SequentialSampler, DataLoader import glob import audioread from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold def set_seed(seed=1234): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True # # RUN THIS PART TO GENERATE AUDIO DURATIONS # audio_paths = glob.glob("/mnt/d/BirdCLEF22/train_short_audio/*/*.ogg") # len(audio_paths) # durations = [] # trunc_audio_paths = [] # for aud in audio_paths: # trunc_audio_paths.append(aud.split("/")[-2] + "/" + aud.split("/")[-1]) # trunc_audio_paths # for path in tqdm(audio_paths): # with audioread.audio_open(path) as f: # durations.append(f.duration) # df = pd.read_csv("./train_metadata.csv") # df_21 = pd.read_csv("./21_train_metadata.csv") # audio_paths = pd.Series(trunc_audio_paths, name='filename') # durations = pd.Series(durations, name='duration') # durations_df = pd.concat([audio_paths, durations], axis=1) # durations_df.head() # durations_df.to_csv("21_durations_df.csv", index=False) # glob.glob("/mnt/d/BirdCLEF22") # df_21['filename'] = df_21['primary_label'] + df_21['slashes'] + df_21['filename'] # df_21['slashes'] = '/' # df_21 # df_21.to_csv("21_train_metadata.csv", index=False) # durations_df # df_22 = pd.read_csv("train_metadata.csv") # df_21_22 = df_21[df_22.columns] # df_21_22 = pd.concat([df_22, df_21_22]) # df_21_22.primary_label.unique() # df_21_22.to_csv("21+22_train_metadata.csv", index=False) # df_21['filename'] = df_21['filename'].apply(lambda x: 'train_short_audio/'+x) # df_22['filename'] = df_22['filename'].apply(lambda x: 'train_audio/'+x) # df_22.to_csv("train_metadata.csv", index=False) # durations_22 = pd.read_csv("durations_df.csv") # durations_21 = pd.read_csv("21_durations_df.csv") # durations_21['filename'] = durations_21['filename'].apply(lambda x: 'train_short_audio/'+x) # durations_22['filename'] = durations_22['filename'].apply(lambda x: 'train_audio/'+x) # durations_22.to_csv("durations_df.csv", index=False) # durations_21.to_csv("21_durations_df.csv", index=False) # durations_21_22 = pd.concat([durations_21, durations_22]) # durations_21_22.to_csv("21+22_durations_df.csv", index=False) # # Config cfg = SimpleNamespace() # paths cfg.data_folder = "" cfg.name = "ari" # cfg.data_dir = "/mnt/d/BirdCLEF22/" cfg.data_dir = "./BirdCLEF22/" cfg.train_data_folder = cfg.data_dir cfg.val_data_folder = cfg.data_dir cfg.output_dir = "model" # dataset cfg.dataset = "base_ds" cfg.min_rating = 0 cfg.val_df = None cfg.batch_size_val = 1 cfg.train_aug = None cfg.val_aug = None cfg.test_augs = None cfg.wav_len_val = 5 # seconds # audio cfg.window_size = 2048 cfg.hop_size = 512 cfg.sample_rate = 32000 cfg.fmin = 16 cfg.fmax = 16386 cfg.power = 2 cfg.mel_bins = 256 cfg.top_db = 80.0 # img model cfg.pretrained = True cfg.pretrained_weights = None cfg.train = True cfg.val = False cfg.in_chans = 1 cfg.alpha = 1 cfg.eval_epochs = 1 cfg.eval_train_epochs = 1 cfg.warmup = 0 cfg.mel_norm = False cfg.label_smoothing = 0 cfg.remove_pretrained = [] # training cfg.seed = 123 cfg.save_val_data = True # ressources cfg.mixed_precision = True cfg.gpu = 0 # cfg.num_workers = mp.cpu_count() # 18 cfg.num_workers = 8 cfg.drop_last = True cfg.mixup2 = 0 cfg.label_smoothing = 0 cfg.mixup_2x = False cfg.birds = np.array( [ "afrsil1", "akekee", "akepa1", "akiapo", "akikik", "amewig", "aniani", "apapan", "arcter", "barpet", "bcnher", "belkin1", "bkbplo", "bknsti", "bkwpet", "blkfra", "blknod", "bongul", "brant", "brnboo", "brnnod", "brnowl", "brtcur", "bubsan", "buffle", "bulpet", "burpar", "buwtea", "cacgoo1", "calqua", "cangoo", "canvas", "caster1", "categr", "chbsan", "chemun", "chukar", "cintea", "comgal1", "commyn", "compea", "comsan", "comwax", "coopet", "crehon", "dunlin", "elepai", "ercfra", "eurwig", "fragul", "gadwal", "gamqua", "glwgul", "gnwtea", "golphe", "grbher3", "grefri", "gresca", "gryfra", "gwfgoo", "hawama", "hawcoo", "hawcre", "hawgoo", "hawhaw", "hawpet1", "hoomer", "houfin", "houspa", "hudgod", "iiwi", "incter1", "jabwar", "japqua", "kalphe", "kauama", "laugul", "layalb", "lcspet", "leasan", "leater1", "lessca", "lesyel", "lobdow", "lotjae", "madpet", "magpet1", "mallar3", "masboo", "mauala", "maupar", "merlin", "mitpar", "moudov", "norcar", "norhar2", "normoc", "norpin", "norsho", "nutman", "oahama", "omao", "osprey", "pagplo", "palila", "parjae", "pecsan", "peflov", "perfal", "pibgre", "pomjae", "puaioh", "reccar", "redava", "redjun", "redpha1", "refboo", "rempar", "rettro", "ribgul", "rinduc", "rinphe", "rocpig", "rorpar", "rudtur", "ruff", "saffin", "sander", "semplo", "sheowl", "shtsan", "skylar", "snogoo", "sooshe", "sooter1", "sopsku1", "sora", "spodov", "sposan", "towsol", "wantat1", "warwhe1", "wesmea", "wessan", "wetshe", "whfibi", "whiter", "whttro", "wiltur", "yebcar", "yefcan", "zebdov", "acafly", "acowoo", "aldfly", "ameavo", "amecro", "amegfi", "amekes", "amepip", "amered", "amerob", "amtspa", "andsol1", "annhum", "astfly", "azaspi1", "babwar", "baleag", "balori", "banana", "banswa", "banwre1", "barant1", "barswa", "batpig1", "bawswa1", "bawwar", "baywre1", "bbwduc", "belvir", "bewwre", "bkbmag1", "bkbwar", "bkcchi", "bkhgro", "bkmtou1", "blbgra1", "blbthr1", "blcjay1", "blctan1", "blhpar1", "blkpho", "blsspa1", "blugrb1", "blujay", "bncfly", "bnhcow", "bobfly1", "botgra", "brbmot1", "brbsol1", "brcvir1", "brebla", "brncre", "brnjay", "brnthr", "brratt1", "brwhaw", "brwpar1", "btbwar", "btnwar", "btywar", "bucmot2", "buggna", "bugtan", "buhvir", "bulori", "burwar1", "bushti", "butsal1", "cacwre", "caltow", "canwar", "carchi", "carwre", "casfin", "caskin", "casvir", "ccbfin", "cedwax", "chbant1", "chbchi", "chbwre1", "chcant2", "chispa", "chswar", "cinfly2", "clanut", "clcrob", "cliswa", "cobtan1", "cocwoo1", "cogdov", "colcha1", "coltro1", "comgol", "comgra", "comloo", "commer", "compau", "compot1", "comrav", "comyel", "coohaw", "cotfly1", "cowscj1", "cregua1", "creoro1", "crfpar", "cubthr", "daejun", "dowwoo", "ducfly", "dusfly", "easblu", "easkin", "easmea", "easpho", "eastow", "eawpew", "eletro", "eucdov", "eursta", "fepowl", "fiespa", "flrtan1", "foxspa", "gartro1", "gbbgul", "gbwwre1", "gcrwar", "gilwoo", "gnttow", "gocfly1", "gockin", "gocspa", "goftyr1", "gohque1", "goowoo1", "grasal1", "grbani", "grcfly", "greegr", "grekis", "grepew", "grethr1", "gretin1", "greyel", "grhcha1", "grhowl", "grnher", "grnjay", "grtgra", "grycat", "gryhaw2", "haiwoo", "heptan", "hergul", "herthr", "herwar", "higmot1", "hofwoo1", "houwre", "hutvir", "incdov", "indbun", "kebtou1", "killde", "labwoo", "larspa", "laufal1", "lazbun", "leafly", "lesgol", "lesgre1", "lesvio1", "linspa", "linwoo1", "littin1", "lobgna5", "logshr", "lotduc", "lotman1", "lucwar", "macwar", "magwar", "marwre", "mastro1", "meapar", "melbla1", "monoro1", "mouchi", "mouela1", "mouqua", "mouwar", "mutswa", "naswar", "norfli", "norpar", "norwat", "nrwswa", "nutwoo", "oaktit", "obnthr1", "ocbfly1", "oliwoo1", "olsfly", "orbeup1", "orbspa1", "orcpar", "orcwar", "orfpar", "ovenbi1", "pabspi1", "paltan1", "palwar", "pasfly", "pavpig2", "phivir", "pilwoo", "pinsis", "pirfly1", "plawre1", "plaxen1", "plsvir", "plupig2", "prowar", "purfin", "purgal2", "putfru1", "pygnut", "rawwre1", "rcatan1", "rebnut", "rebsap", "rebwoo", "redcro", "reevir1", "rehbar1", "relpar", "reshaw", "rethaw", "rewbla", "rinkin1", "roahaw", "robgro", "rocpig1", "rotbec", "royter1", "rthhum", "rtlhum", "ruboro1", "rubpep1", "rubrob", "rubwre1", "ruckin", "rucspa1", "rucwar", "rucwar1", "rudpig", "rufhum", "rugdov", "rumfly1", "runwre1", "rutjac1", "sancra", "savspa", "saypho", "scamac1", "scatan", "scbwre1", "scptyr1", "scrtan1", "shicow", "sibtan2", "sinwre1", "sltred", "smbani", "sobtyr1", "socfly1", "solsan", "sonspa", "soulap1", "spotow", "spvear1", "squcuc1", "stbori", "stejay", "sthant1", "sthwoo1", "strcuc1", "strfly1", "strsal1", "stvhum2", "subfly", "sumtan", "swaspa", "swathr", "tenwar", "thbeup1", "thbkin", "thswar1", "treswa", "trogna1", "trokin", "tromoc", "tropar", "tropew1", "tuftit", "tunswa", "veery", "verdin", "vigswa", "warvir", "wbwwre1", "webwoo1", "wegspa1", "wesant1", "wesblu", "weskin", "westan", "wewpew", "whbman1", "whbnut", "whcpar", "whcsee1", "whcspa", "whevir", "whfpar1", "whimbr", "whiwre1", "whtdov", "whtspa", "whwbec1", "whwdov", "wilfly", "willet1", "wilsni1", "wlswar", "wooduc", "woothr", "wrenti", "y00475", "yebcha", "yebela1", "yebfly", "yebori1", "yebsap", "yebsee1", "yefgra1", "yegvir", "yehbla", "yehcar1", "yelgro", "yelwar", "yeofly1", "yerwar", "yeteup1", "yetvir", ] ) cfg.n_classes = len(cfg.birds) # dataset cfg.min_rating = 2.0 cfg.wav_crop_len = 30 # seconds # cfg.lr = 0.0001 cfg.lr = 1e-3 cfg.weight_decay = 1e-6 cfg.epochs = 20 cfg.batch_size = 16 cfg.batch_size_val = 16 # cfg.backbone = "resnet34" cfg.backbone = "eca_nfnet_l0" cfg.fold = 0 cfg.save_val_data = False cfg.mixed_precision = True cfg.mixup = True cfg.mix_beta = 1 cfg.train_df1 = "21+22_train_metadata.csv" cfg.train_df2 = "./21+22_durations_df.csv" cfg.device = "cuda" if torch.cuda.is_available() else "cpu" cfg.tr_collate_fn = None cfg.val_collate_fn = None cfg.val = True cfg.dev = False cfg df = pd.read_csv("21+22_train_metadata.csv") ddf = pd.read_csv("21+22_durations_df.csv") # Durations csv generated above merge = pd.merge(df, ddf, on="filename") merge # uniques = [] # for label in train_df['secondary_labels'].unique(): # label = ast.literal_eval(label) # for l in label: # if not (l in(uniques)): # uniques.append(l) # len(uniques) # for u in uniques: # if not (u in np.intersect1d(cfg.birds, uniques)): # print(u) # len(np.intersect1d(cfg.birds, uniques)) # # Dataset def batch_to_device(batch, device): batch_dict = {key: batch[key].to(device) for key in batch} return batch_dict class CustomDataset(Dataset): def __init__(self, df, cfg, aug, mode="train"): self.cfg = cfg self.mode = mode self.df = df.copy() self.bird2id = {bird: idx for idx, bird in enumerate(cfg.birds)} if self.mode == "train": self.data_folder = cfg.train_data_folder self.df = self.df[self.df["rating"] >= self.cfg.min_rating] elif self.mode == "val": self.data_folder = cfg.val_data_folder elif self.mode == "test": self.data_folder = cfg.test_data_folder self.fns = self.df["filename"].unique() self.df = self.setup_df() self.aug_audio = cfg.train_aug def setup_df(self): df = self.df.copy() if self.mode == "train": df["weight"] = np.clip(df["rating"] / df["rating"].max(), 0.1, 1.0) df["target"] = df["primary_label"].apply(self.bird2id.get) labels = np.eye(self.cfg.n_classes)[df["target"].astype(int).values] label2 = ( df["secondary_labels"].apply(lambda x: self.secondary2target(x)).values ) for i, t in enumerate(label2): try: labels[i, t] = 1 except: print(df) else: targets = df["birds"].apply(lambda x: self.birds2target(x)).values labels = np.zeros((df.shape[0], self.cfg.n_classes)) for i, t in enumerate(targets): labels[i, t] = 1 df[[f"t{i}" for i in range(self.cfg.n_classes)]] = labels if self.mode != "train": df = df.groupby("filename") return df def __getitem__(self, idx): if self.mode == "train": row = self.df.iloc[idx] fn = row["filename"] label = row[[f"t{i}" for i in range(self.cfg.n_classes)]].values weight = row["weight"] # fold = row["fold"] fold = -1 # wav_len = row["length"] parts = 1 else: fn = self.fns[idx] row = self.df.get_group(fn) label = row[[f"t{i}" for i in range(self.cfg.n_classes)]].values wav_len = None parts = label.shape[0] fold = -1 weight = 1 if self.mode == "train": # wav_len_sec = wav_len / self.cfg.sample_rate wav_len_sec = row["duration"] duration = self.cfg.wav_crop_len max_offset = wav_len_sec - duration max_offset = max(max_offset, 1) offset = np.random.randint(max_offset) else: offset = 0.0 duration = None wav = self.load_one(fn, offset, duration) if wav.shape[0] < (self.cfg.wav_crop_len * self.cfg.sample_rate): pad = self.cfg.wav_crop_len * self.cfg.sample_rate - wav.shape[0] wav = np.pad(wav, (0, pad)) if self.mode == "train": if self.aug_audio: wav = self.aug_audio(samples=wav, sample_rate=self.cfg.sample_rate) else: if self.cfg.val_aug: wav = self.cfg.val_aug(samples=wav, sample_rate=self.cfg.sample_rate) wav_tensor = torch.tensor(wav) # (n_samples) if parts > 1: n_samples = wav_tensor.shape[0] wav_tensor = wav_tensor[: n_samples // parts * parts].reshape( parts, n_samples // parts ) feature_dict = { "input": wav_tensor, "target": torch.tensor(label.astype(np.float32)), "weight": torch.tensor(weight), "fold": torch.tensor(fold), } return feature_dict def __len__(self): if cfg.dev: return 256 return len(self.fns) def load_one(self, id_, offset, duration): fp = self.data_folder + id_ try: wav, sr = librosa.load(fp, sr=None, offset=offset, duration=duration) except: print("FAIL READING rec", fp) return wav def birds2target(self, birds): birds = birds.split() target = [self.bird2id.get(item) for item in birds if not item == "nocall"] return target def secondary2target(self, secondary_label): birds = ast.literal_eval(secondary_label) target = [self.bird2id.get(item) for item in birds if not item == "nocall"] return target # # Dataset related utils def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) def get_train_dataloader(train_ds, cfg): train_dataloader = DataLoader( train_ds, sampler=None, shuffle=True, batch_size=cfg.batch_size, num_workers=cfg.num_workers, pin_memory=False, collate_fn=cfg.tr_collate_fn, drop_last=cfg.drop_last, worker_init_fn=worker_init_fn, ) print(f"train: dataset {len(train_ds)}, dataloader {len(train_dataloader)}") return train_dataloader def get_val_dataloader(val_ds, cfg): val_dataloader = DataLoader( val_ds, sampler=None, shuffle=True, batch_size=cfg.batch_size, num_workers=cfg.num_workers, pin_memory=False, collate_fn=cfg.val_collate_fn, drop_last=cfg.drop_last, worker_init_fn=worker_init_fn, ) print(f"val: dataset {len(val_ds)}, dataloader {len(val_dataloader)}") return val_dataloader def get_scheduler(cfg, optimizer, total_steps): scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=cfg.warmup * (total_steps // cfg.batch_size), num_training_steps=cfg.epochs * (total_steps // cfg.batch_size), ) return scheduler def load_df(cfg): train_df1 = pd.read_csv(cfg.train_df1) train_df2 = pd.read_csv(cfg.train_df2) # train_df = pd.merge(train_df1[['primary_label', 'secondary_labels', 'rating', 'filename']], train_df2[['filename', 'duration']], how='inner', on='filename') train_df = pd.merge(train_df1, train_df2, on="filename") return train_df # # GeM and Mix-up def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1.0 / p) class GeM(nn.Module): # Generalized mean: https://arxiv.org/abs/1711.02512 def __init__(self, p=3, eps=1e-6): super(GeM, self).__init__() self.p = Parameter(torch.ones(1) * p) self.eps = eps def forward(self, x): ret = gem(x, p=self.p, eps=self.eps) return ret def __repr__(self): return ( self.__class__.__name__ + "(p=" + "{:.4f}".format(self.p.data.tolist()[0]) + ", eps=" + str(self.eps) + ")" ) class Mixup(nn.Module): def __init__(self, mix_beta): super(Mixup, self).__init__() self.beta_distribution = Beta(mix_beta, mix_beta) def forward(self, X, Y, weight=None): bs = X.shape[0] n_dims = len(X.shape) perm = torch.randperm(bs) coeffs = self.beta_distribution.rsample(torch.Size((bs,))).to(X.device) if n_dims == 2: X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm] elif n_dims == 3: X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm] else: X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm] Y = coeffs.view(-1, 1) * Y + (1 - coeffs.view(-1, 1)) * Y[perm] if weight is None: return X, Y else: weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm] return X, Y, weight class CutMix(nn.Module): def __init__(self, mix_beta): super(CutMix, self).__init__() self.beta_distribution = Beta(mix_beta, mix_beta) def forward(self, X, Y, weight=None): bs = X.shape[0] n_dims = len(X.shape) perm = torch.randperm(bs) coeffs = self.beta_distribution.rsample(torch.Size((bs,))).to(X.device) if n_dims == 2: X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm] elif n_dims == 3: X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm] else: X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm] Y = coeffs.view(-1, 1) * Y + (1 - coeffs.view(-1, 1)) * Y[perm] if weight is None: return X, Y else: weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm] return X, Y, weight def run_eval(model, val_dataloader, cfg, pre="val"): model.eval() torch.set_grad_enabled(False) val_data = defaultdict(list) for data in tqdm(val_dataloader): batch = batch_to_device(data, device) if cfg.mixed_precision: with autocast(): output = model(batch, val=True) else: output = model(batch, val=True) for key, val in output.items(): val_data[key] += [output[key]] for key, val in output.items(): value = val_data[key] if len(value[0].shape) == 0: val_data[key] = torch.stack(value) else: val_data[key] = torch.cat(value, dim=0) if cfg.save_val_data: torch.save(val_data, f"{cfg.output_dir}/{pre}_data_seed{cfg.seed}.pth") if "loss" in val_data: val_losses = val_data["loss"].cpu().numpy() val_loss = np.mean(val_losses) print(f"Mean {pre}_loss", np.mean(val_losses)) else: val_loss = 0.0 print("EVAL FINISHED") return val_loss # # Model class Net(nn.Module): def __init__(self, cfg): super(Net, self).__init__() self.cfg = cfg self.n_classes = cfg.n_classes self.mel_spec = ta.transforms.MelSpectrogram( sample_rate=cfg.sample_rate, n_fft=cfg.window_size, win_length=cfg.window_size, hop_length=cfg.hop_size, f_min=cfg.fmin, f_max=cfg.fmax, pad=0, n_mels=cfg.mel_bins, power=cfg.power, normalized=False, ) self.amplitude_to_db = ta.transforms.AmplitudeToDB(top_db=cfg.top_db) self.wav2img = torch.nn.Sequential(self.mel_spec, self.amplitude_to_db) self.backbone = timm.create_model( cfg.backbone, pretrained=cfg.pretrained, num_classes=0, global_pool="", in_chans=cfg.in_chans, ) if "efficientnet" in cfg.backbone: backbone_out = self.backbone.num_features else: backbone_out = self.backbone.feature_info[-1]["num_chs"] self.global_pool = GeM() self.head = nn.Linear(backbone_out, self.n_classes) if cfg.pretrained_weights is not None: sd = torch.load(cfg.pretrained_weights, map_location="cpu")["model"] sd = {k.replace("module.", ""): v for k, v in sd.items()} self.load_state_dict(sd, strict=True) print("weights loaded from", cfg.pretrained_weights) self.loss_fn = nn.BCEWithLogitsLoss(reduction="none") self.mixup = Mixup(mix_beta=cfg.mix_beta) self.factor = int(cfg.wav_crop_len / 5.0) def forward(self, batch, val=False): if not self.training: if not val: x = batch["input"] bs, parts, time = x.shape x = x.reshape(parts, time) y = batch["target"] y = y[0] else: x = batch["input"] y = batch["target"] bs, time = x.shape x = x.reshape(bs * self.factor, time // self.factor) else: x = batch["input"] y = batch["target"] bs, time = x.shape x = x.reshape(bs * self.factor, time // self.factor) with autocast(enabled=False): x = self.wav2img(x) # (bs, mel, time) if self.cfg.mel_norm: x = (x + 80) / 80 x = x.permute(0, 2, 1) x = x[:, None, :, :] weight = batch["weight"] if self.training or val: b, c, t, f = x.shape x = x.permute(0, 2, 1, 3) x = x.reshape(b // self.factor, self.factor * t, c, f) if self.cfg.mixup: x, y, weight = self.mixup(x, y, weight) # x, y = self.mixup(x, y) if self.cfg.mixup2: x, y, weight = self.mixup(x, y, weight) x = x.reshape(b, t, c, f) x = x.permute(0, 2, 1, 3) x = self.backbone(x) if self.training or val: b, c, t, f = x.shape x = x.permute(0, 2, 1, 3) x = x.reshape(b // self.factor, self.factor * t, c, f) x = x.permute(0, 2, 1, 3) x = self.global_pool(x) x = x[:, :, 0, 0] logits = self.head(x) loss = self.loss_fn(logits, y) loss = (loss.mean(dim=1) * weight) / weight.sum() loss = loss.sum() return { "loss": loss, "logits": logits.sigmoid(), "logits_raw": logits, "target": y, } def create_checkpoint(model, optimizer, epoch, scheduler=None, scaler=None): checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch, } if scheduler is not None: checkpoint["scheduler"] = scheduler.state_dict() if scaler is not None: checkpoint["scaler"] = scaler.state_dict() return checkpoint set_seed(cfg.seed) train_df = load_df(cfg) kf = StratifiedKFold(n_splits=5) for n, (trn_index, val_index) in enumerate( kf.split(train_df, train_df["primary_label"]) ): train_df.loc[val_index, "kfold"] = int(n) train_df["kfold"] = train_df["kfold"].astype(int) val_df = train_df[train_df["kfold"] == cfg.fold] val_dataset = CustomDataset(val_df, cfg, aug=cfg.val_aug, mode="train") val_dataloader = get_val_dataloader(val_dataset, cfg) train = train_df[train_df["kfold"] != cfg.fold] train_dataset = CustomDataset(train, cfg, aug=cfg.train_aug, mode="train") train_dataloader = get_train_dataloader(train_dataset, cfg) model = Net(cfg) model.to(cfg.device) total_steps = len(train_dataset) params = model.parameters() # optimizer = optim.Adam(params, lr=cfg.lr, weight_decay=0) optimizer = transformers.AdamW(params, lr=cfg.lr, weight_decay=cfg.weight_decay) scheduler = get_scheduler(cfg, optimizer, total_steps) device = cfg.device try: os.makedirs(cfg.output_dir) except: pass if cfg.mixed_precision: scaler = GradScaler() else: scaler = None cfg.curr_step = 0 i = 0 best_val_loss = np.inf optimizer.zero_grad() for epoch in range(cfg.epochs): set_seed(cfg.seed + epoch) cfg.curr_epoch = epoch print("EPOCH:", epoch) progress_bar = tqdm(range(len(train_dataloader))) tr_it = iter(train_dataloader) losses = [] gc.collect() if cfg.train: # ==== TRAIN LOOP for itr in progress_bar: i += 1 cfg.curr_step += cfg.batch_size data = next(tr_it) model.train() torch.set_grad_enabled(True) batch = batch_to_device(data, device) if cfg.mixed_precision: with autocast(): output_dict = model(batch) else: output_dict = model(batch) loss = output_dict["loss"] losses.append(loss.item()) if cfg.mixed_precision: scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() optimizer.zero_grad() else: loss.backward() optimizer.step() optimizer.zero_grad() if scheduler is not None: scheduler.step() if cfg.curr_step % cfg.batch_size == 0: progress_bar.set_description(f"loss: {np.mean(losses[-10:]):.4f}") if cfg.val: if (epoch + 1) % cfg.eval_epochs == 0 or (epoch + 1) == cfg.epochs: val_loss = run_eval(model, val_dataloader, cfg) else: val_score = 0 if cfg.epochs > 0: checkpoint = create_checkpoint( model, optimizer, epoch, scheduler=scheduler, scaler=scaler ) torch.save( checkpoint, f"{cfg.output_dir}/checkpoint_last_seed{cfg.seed}_{epoch}.pth" ) if cfg.epochs > 0: checkpoint = create_checkpoint( model, optimizer, epoch, scheduler=scheduler, scaler=scaler ) torch.save(checkpoint, f"{cfg.output_dir}/checkpoint_last_seed{cfg.seed}.pth")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450016.ipynb
null
null
[{"Id": 129450016, "ScriptId": 38490460, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7211003, "CreationDate": "05/13/2023 23:45:33", "VersionNumber": 1.0, "Title": "BirdCLEF22 ECANFNetL0 Training Code", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 916.0, "LinesInsertedFromPrevious": 916.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Making this public for application to Atlas Fellowship # **Note:** You cannot run this code on Kaggle as you need an Nvidia A40 GPU. Either reduce batch size/image size or use local hardware/cloud computing other than Kaggle. import timm timm.__version__ # # Imports import pandas as pd import timm from torch import nn import torch import torchaudio as ta from torch.cuda.amp import autocast import random from torch.nn import functional as F from torch.distributions import Beta from torch.nn.parameter import Parameter from torch.utils.data import Dataset import numpy as np import librosa import ast import multiprocessing as mp import os from types import SimpleNamespace import numpy as np import numpy as np import pandas as pd import importlib import sys import random from tqdm import tqdm import gc import argparse import torch from torch import optim from torch.cuda.amp import GradScaler, autocast from collections import defaultdict import cv2 from copy import copy import os import transformers from transformers import get_cosine_schedule_with_warmup from torch.utils.data import SequentialSampler, DataLoader import glob import audioread from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold def set_seed(seed=1234): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True # # RUN THIS PART TO GENERATE AUDIO DURATIONS # audio_paths = glob.glob("/mnt/d/BirdCLEF22/train_short_audio/*/*.ogg") # len(audio_paths) # durations = [] # trunc_audio_paths = [] # for aud in audio_paths: # trunc_audio_paths.append(aud.split("/")[-2] + "/" + aud.split("/")[-1]) # trunc_audio_paths # for path in tqdm(audio_paths): # with audioread.audio_open(path) as f: # durations.append(f.duration) # df = pd.read_csv("./train_metadata.csv") # df_21 = pd.read_csv("./21_train_metadata.csv") # audio_paths = pd.Series(trunc_audio_paths, name='filename') # durations = pd.Series(durations, name='duration') # durations_df = pd.concat([audio_paths, durations], axis=1) # durations_df.head() # durations_df.to_csv("21_durations_df.csv", index=False) # glob.glob("/mnt/d/BirdCLEF22") # df_21['filename'] = df_21['primary_label'] + df_21['slashes'] + df_21['filename'] # df_21['slashes'] = '/' # df_21 # df_21.to_csv("21_train_metadata.csv", index=False) # durations_df # df_22 = pd.read_csv("train_metadata.csv") # df_21_22 = df_21[df_22.columns] # df_21_22 = pd.concat([df_22, df_21_22]) # df_21_22.primary_label.unique() # df_21_22.to_csv("21+22_train_metadata.csv", index=False) # df_21['filename'] = df_21['filename'].apply(lambda x: 'train_short_audio/'+x) # df_22['filename'] = df_22['filename'].apply(lambda x: 'train_audio/'+x) # df_22.to_csv("train_metadata.csv", index=False) # durations_22 = pd.read_csv("durations_df.csv") # durations_21 = pd.read_csv("21_durations_df.csv") # durations_21['filename'] = durations_21['filename'].apply(lambda x: 'train_short_audio/'+x) # durations_22['filename'] = durations_22['filename'].apply(lambda x: 'train_audio/'+x) # durations_22.to_csv("durations_df.csv", index=False) # durations_21.to_csv("21_durations_df.csv", index=False) # durations_21_22 = pd.concat([durations_21, durations_22]) # durations_21_22.to_csv("21+22_durations_df.csv", index=False) # # Config cfg = SimpleNamespace() # paths cfg.data_folder = "" cfg.name = "ari" # cfg.data_dir = "/mnt/d/BirdCLEF22/" cfg.data_dir = "./BirdCLEF22/" cfg.train_data_folder = cfg.data_dir cfg.val_data_folder = cfg.data_dir cfg.output_dir = "model" # dataset cfg.dataset = "base_ds" cfg.min_rating = 0 cfg.val_df = None cfg.batch_size_val = 1 cfg.train_aug = None cfg.val_aug = None cfg.test_augs = None cfg.wav_len_val = 5 # seconds # audio cfg.window_size = 2048 cfg.hop_size = 512 cfg.sample_rate = 32000 cfg.fmin = 16 cfg.fmax = 16386 cfg.power = 2 cfg.mel_bins = 256 cfg.top_db = 80.0 # img model cfg.pretrained = True cfg.pretrained_weights = None cfg.train = True cfg.val = False cfg.in_chans = 1 cfg.alpha = 1 cfg.eval_epochs = 1 cfg.eval_train_epochs = 1 cfg.warmup = 0 cfg.mel_norm = False cfg.label_smoothing = 0 cfg.remove_pretrained = [] # training cfg.seed = 123 cfg.save_val_data = True # ressources cfg.mixed_precision = True cfg.gpu = 0 # cfg.num_workers = mp.cpu_count() # 18 cfg.num_workers = 8 cfg.drop_last = True cfg.mixup2 = 0 cfg.label_smoothing = 0 cfg.mixup_2x = False cfg.birds = np.array( [ "afrsil1", "akekee", "akepa1", "akiapo", "akikik", "amewig", "aniani", "apapan", "arcter", "barpet", "bcnher", "belkin1", "bkbplo", "bknsti", "bkwpet", "blkfra", "blknod", "bongul", "brant", "brnboo", "brnnod", "brnowl", "brtcur", "bubsan", "buffle", "bulpet", "burpar", "buwtea", "cacgoo1", "calqua", "cangoo", "canvas", "caster1", "categr", "chbsan", "chemun", "chukar", "cintea", "comgal1", "commyn", "compea", "comsan", "comwax", "coopet", "crehon", "dunlin", "elepai", "ercfra", "eurwig", "fragul", "gadwal", "gamqua", "glwgul", "gnwtea", "golphe", "grbher3", "grefri", "gresca", "gryfra", "gwfgoo", "hawama", "hawcoo", "hawcre", "hawgoo", "hawhaw", "hawpet1", "hoomer", "houfin", "houspa", "hudgod", "iiwi", "incter1", "jabwar", "japqua", "kalphe", "kauama", "laugul", "layalb", "lcspet", "leasan", "leater1", "lessca", "lesyel", "lobdow", "lotjae", "madpet", "magpet1", "mallar3", "masboo", "mauala", "maupar", "merlin", "mitpar", "moudov", "norcar", "norhar2", "normoc", "norpin", "norsho", "nutman", "oahama", "omao", "osprey", "pagplo", "palila", "parjae", "pecsan", "peflov", "perfal", "pibgre", "pomjae", "puaioh", "reccar", "redava", "redjun", "redpha1", "refboo", "rempar", "rettro", "ribgul", "rinduc", "rinphe", "rocpig", "rorpar", "rudtur", "ruff", "saffin", "sander", "semplo", "sheowl", "shtsan", "skylar", "snogoo", "sooshe", "sooter1", "sopsku1", "sora", "spodov", "sposan", "towsol", "wantat1", "warwhe1", "wesmea", "wessan", "wetshe", "whfibi", "whiter", "whttro", "wiltur", "yebcar", "yefcan", "zebdov", "acafly", "acowoo", "aldfly", "ameavo", "amecro", "amegfi", "amekes", "amepip", "amered", "amerob", "amtspa", "andsol1", "annhum", "astfly", "azaspi1", "babwar", "baleag", "balori", "banana", "banswa", "banwre1", "barant1", "barswa", "batpig1", "bawswa1", "bawwar", "baywre1", "bbwduc", "belvir", "bewwre", "bkbmag1", "bkbwar", "bkcchi", "bkhgro", "bkmtou1", "blbgra1", "blbthr1", "blcjay1", "blctan1", "blhpar1", "blkpho", "blsspa1", "blugrb1", "blujay", "bncfly", "bnhcow", "bobfly1", "botgra", "brbmot1", "brbsol1", "brcvir1", "brebla", "brncre", "brnjay", "brnthr", "brratt1", "brwhaw", "brwpar1", "btbwar", "btnwar", "btywar", "bucmot2", "buggna", "bugtan", "buhvir", "bulori", "burwar1", "bushti", "butsal1", "cacwre", "caltow", "canwar", "carchi", "carwre", "casfin", "caskin", "casvir", "ccbfin", "cedwax", "chbant1", "chbchi", "chbwre1", "chcant2", "chispa", "chswar", "cinfly2", "clanut", "clcrob", "cliswa", "cobtan1", "cocwoo1", "cogdov", "colcha1", "coltro1", "comgol", "comgra", "comloo", "commer", "compau", "compot1", "comrav", "comyel", "coohaw", "cotfly1", "cowscj1", "cregua1", "creoro1", "crfpar", "cubthr", "daejun", "dowwoo", "ducfly", "dusfly", "easblu", "easkin", "easmea", "easpho", "eastow", "eawpew", "eletro", "eucdov", "eursta", "fepowl", "fiespa", "flrtan1", "foxspa", "gartro1", "gbbgul", "gbwwre1", "gcrwar", "gilwoo", "gnttow", "gocfly1", "gockin", "gocspa", "goftyr1", "gohque1", "goowoo1", "grasal1", "grbani", "grcfly", "greegr", "grekis", "grepew", "grethr1", "gretin1", "greyel", "grhcha1", "grhowl", "grnher", "grnjay", "grtgra", "grycat", "gryhaw2", "haiwoo", "heptan", "hergul", "herthr", "herwar", "higmot1", "hofwoo1", "houwre", "hutvir", "incdov", "indbun", "kebtou1", "killde", "labwoo", "larspa", "laufal1", "lazbun", "leafly", "lesgol", "lesgre1", "lesvio1", "linspa", "linwoo1", "littin1", "lobgna5", "logshr", "lotduc", "lotman1", "lucwar", "macwar", "magwar", "marwre", "mastro1", "meapar", "melbla1", "monoro1", "mouchi", "mouela1", "mouqua", "mouwar", "mutswa", "naswar", "norfli", "norpar", "norwat", "nrwswa", "nutwoo", "oaktit", "obnthr1", "ocbfly1", "oliwoo1", "olsfly", "orbeup1", "orbspa1", "orcpar", "orcwar", "orfpar", "ovenbi1", "pabspi1", "paltan1", "palwar", "pasfly", "pavpig2", "phivir", "pilwoo", "pinsis", "pirfly1", "plawre1", "plaxen1", "plsvir", "plupig2", "prowar", "purfin", "purgal2", "putfru1", "pygnut", "rawwre1", "rcatan1", "rebnut", "rebsap", "rebwoo", "redcro", "reevir1", "rehbar1", "relpar", "reshaw", "rethaw", "rewbla", "rinkin1", "roahaw", "robgro", "rocpig1", "rotbec", "royter1", "rthhum", "rtlhum", "ruboro1", "rubpep1", "rubrob", "rubwre1", "ruckin", "rucspa1", "rucwar", "rucwar1", "rudpig", "rufhum", "rugdov", "rumfly1", "runwre1", "rutjac1", "sancra", "savspa", "saypho", "scamac1", "scatan", "scbwre1", "scptyr1", "scrtan1", "shicow", "sibtan2", "sinwre1", "sltred", "smbani", "sobtyr1", "socfly1", "solsan", "sonspa", "soulap1", "spotow", "spvear1", "squcuc1", "stbori", "stejay", "sthant1", "sthwoo1", "strcuc1", "strfly1", "strsal1", "stvhum2", "subfly", "sumtan", "swaspa", "swathr", "tenwar", "thbeup1", "thbkin", "thswar1", "treswa", "trogna1", "trokin", "tromoc", "tropar", "tropew1", "tuftit", "tunswa", "veery", "verdin", "vigswa", "warvir", "wbwwre1", "webwoo1", "wegspa1", "wesant1", "wesblu", "weskin", "westan", "wewpew", "whbman1", "whbnut", "whcpar", "whcsee1", "whcspa", "whevir", "whfpar1", "whimbr", "whiwre1", "whtdov", "whtspa", "whwbec1", "whwdov", "wilfly", "willet1", "wilsni1", "wlswar", "wooduc", "woothr", "wrenti", "y00475", "yebcha", "yebela1", "yebfly", "yebori1", "yebsap", "yebsee1", "yefgra1", "yegvir", "yehbla", "yehcar1", "yelgro", "yelwar", "yeofly1", "yerwar", "yeteup1", "yetvir", ] ) cfg.n_classes = len(cfg.birds) # dataset cfg.min_rating = 2.0 cfg.wav_crop_len = 30 # seconds # cfg.lr = 0.0001 cfg.lr = 1e-3 cfg.weight_decay = 1e-6 cfg.epochs = 20 cfg.batch_size = 16 cfg.batch_size_val = 16 # cfg.backbone = "resnet34" cfg.backbone = "eca_nfnet_l0" cfg.fold = 0 cfg.save_val_data = False cfg.mixed_precision = True cfg.mixup = True cfg.mix_beta = 1 cfg.train_df1 = "21+22_train_metadata.csv" cfg.train_df2 = "./21+22_durations_df.csv" cfg.device = "cuda" if torch.cuda.is_available() else "cpu" cfg.tr_collate_fn = None cfg.val_collate_fn = None cfg.val = True cfg.dev = False cfg df = pd.read_csv("21+22_train_metadata.csv") ddf = pd.read_csv("21+22_durations_df.csv") # Durations csv generated above merge = pd.merge(df, ddf, on="filename") merge # uniques = [] # for label in train_df['secondary_labels'].unique(): # label = ast.literal_eval(label) # for l in label: # if not (l in(uniques)): # uniques.append(l) # len(uniques) # for u in uniques: # if not (u in np.intersect1d(cfg.birds, uniques)): # print(u) # len(np.intersect1d(cfg.birds, uniques)) # # Dataset def batch_to_device(batch, device): batch_dict = {key: batch[key].to(device) for key in batch} return batch_dict class CustomDataset(Dataset): def __init__(self, df, cfg, aug, mode="train"): self.cfg = cfg self.mode = mode self.df = df.copy() self.bird2id = {bird: idx for idx, bird in enumerate(cfg.birds)} if self.mode == "train": self.data_folder = cfg.train_data_folder self.df = self.df[self.df["rating"] >= self.cfg.min_rating] elif self.mode == "val": self.data_folder = cfg.val_data_folder elif self.mode == "test": self.data_folder = cfg.test_data_folder self.fns = self.df["filename"].unique() self.df = self.setup_df() self.aug_audio = cfg.train_aug def setup_df(self): df = self.df.copy() if self.mode == "train": df["weight"] = np.clip(df["rating"] / df["rating"].max(), 0.1, 1.0) df["target"] = df["primary_label"].apply(self.bird2id.get) labels = np.eye(self.cfg.n_classes)[df["target"].astype(int).values] label2 = ( df["secondary_labels"].apply(lambda x: self.secondary2target(x)).values ) for i, t in enumerate(label2): try: labels[i, t] = 1 except: print(df) else: targets = df["birds"].apply(lambda x: self.birds2target(x)).values labels = np.zeros((df.shape[0], self.cfg.n_classes)) for i, t in enumerate(targets): labels[i, t] = 1 df[[f"t{i}" for i in range(self.cfg.n_classes)]] = labels if self.mode != "train": df = df.groupby("filename") return df def __getitem__(self, idx): if self.mode == "train": row = self.df.iloc[idx] fn = row["filename"] label = row[[f"t{i}" for i in range(self.cfg.n_classes)]].values weight = row["weight"] # fold = row["fold"] fold = -1 # wav_len = row["length"] parts = 1 else: fn = self.fns[idx] row = self.df.get_group(fn) label = row[[f"t{i}" for i in range(self.cfg.n_classes)]].values wav_len = None parts = label.shape[0] fold = -1 weight = 1 if self.mode == "train": # wav_len_sec = wav_len / self.cfg.sample_rate wav_len_sec = row["duration"] duration = self.cfg.wav_crop_len max_offset = wav_len_sec - duration max_offset = max(max_offset, 1) offset = np.random.randint(max_offset) else: offset = 0.0 duration = None wav = self.load_one(fn, offset, duration) if wav.shape[0] < (self.cfg.wav_crop_len * self.cfg.sample_rate): pad = self.cfg.wav_crop_len * self.cfg.sample_rate - wav.shape[0] wav = np.pad(wav, (0, pad)) if self.mode == "train": if self.aug_audio: wav = self.aug_audio(samples=wav, sample_rate=self.cfg.sample_rate) else: if self.cfg.val_aug: wav = self.cfg.val_aug(samples=wav, sample_rate=self.cfg.sample_rate) wav_tensor = torch.tensor(wav) # (n_samples) if parts > 1: n_samples = wav_tensor.shape[0] wav_tensor = wav_tensor[: n_samples // parts * parts].reshape( parts, n_samples // parts ) feature_dict = { "input": wav_tensor, "target": torch.tensor(label.astype(np.float32)), "weight": torch.tensor(weight), "fold": torch.tensor(fold), } return feature_dict def __len__(self): if cfg.dev: return 256 return len(self.fns) def load_one(self, id_, offset, duration): fp = self.data_folder + id_ try: wav, sr = librosa.load(fp, sr=None, offset=offset, duration=duration) except: print("FAIL READING rec", fp) return wav def birds2target(self, birds): birds = birds.split() target = [self.bird2id.get(item) for item in birds if not item == "nocall"] return target def secondary2target(self, secondary_label): birds = ast.literal_eval(secondary_label) target = [self.bird2id.get(item) for item in birds if not item == "nocall"] return target # # Dataset related utils def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) def get_train_dataloader(train_ds, cfg): train_dataloader = DataLoader( train_ds, sampler=None, shuffle=True, batch_size=cfg.batch_size, num_workers=cfg.num_workers, pin_memory=False, collate_fn=cfg.tr_collate_fn, drop_last=cfg.drop_last, worker_init_fn=worker_init_fn, ) print(f"train: dataset {len(train_ds)}, dataloader {len(train_dataloader)}") return train_dataloader def get_val_dataloader(val_ds, cfg): val_dataloader = DataLoader( val_ds, sampler=None, shuffle=True, batch_size=cfg.batch_size, num_workers=cfg.num_workers, pin_memory=False, collate_fn=cfg.val_collate_fn, drop_last=cfg.drop_last, worker_init_fn=worker_init_fn, ) print(f"val: dataset {len(val_ds)}, dataloader {len(val_dataloader)}") return val_dataloader def get_scheduler(cfg, optimizer, total_steps): scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=cfg.warmup * (total_steps // cfg.batch_size), num_training_steps=cfg.epochs * (total_steps // cfg.batch_size), ) return scheduler def load_df(cfg): train_df1 = pd.read_csv(cfg.train_df1) train_df2 = pd.read_csv(cfg.train_df2) # train_df = pd.merge(train_df1[['primary_label', 'secondary_labels', 'rating', 'filename']], train_df2[['filename', 'duration']], how='inner', on='filename') train_df = pd.merge(train_df1, train_df2, on="filename") return train_df # # GeM and Mix-up def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1.0 / p) class GeM(nn.Module): # Generalized mean: https://arxiv.org/abs/1711.02512 def __init__(self, p=3, eps=1e-6): super(GeM, self).__init__() self.p = Parameter(torch.ones(1) * p) self.eps = eps def forward(self, x): ret = gem(x, p=self.p, eps=self.eps) return ret def __repr__(self): return ( self.__class__.__name__ + "(p=" + "{:.4f}".format(self.p.data.tolist()[0]) + ", eps=" + str(self.eps) + ")" ) class Mixup(nn.Module): def __init__(self, mix_beta): super(Mixup, self).__init__() self.beta_distribution = Beta(mix_beta, mix_beta) def forward(self, X, Y, weight=None): bs = X.shape[0] n_dims = len(X.shape) perm = torch.randperm(bs) coeffs = self.beta_distribution.rsample(torch.Size((bs,))).to(X.device) if n_dims == 2: X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm] elif n_dims == 3: X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm] else: X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm] Y = coeffs.view(-1, 1) * Y + (1 - coeffs.view(-1, 1)) * Y[perm] if weight is None: return X, Y else: weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm] return X, Y, weight class CutMix(nn.Module): def __init__(self, mix_beta): super(CutMix, self).__init__() self.beta_distribution = Beta(mix_beta, mix_beta) def forward(self, X, Y, weight=None): bs = X.shape[0] n_dims = len(X.shape) perm = torch.randperm(bs) coeffs = self.beta_distribution.rsample(torch.Size((bs,))).to(X.device) if n_dims == 2: X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm] elif n_dims == 3: X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm] else: X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm] Y = coeffs.view(-1, 1) * Y + (1 - coeffs.view(-1, 1)) * Y[perm] if weight is None: return X, Y else: weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm] return X, Y, weight def run_eval(model, val_dataloader, cfg, pre="val"): model.eval() torch.set_grad_enabled(False) val_data = defaultdict(list) for data in tqdm(val_dataloader): batch = batch_to_device(data, device) if cfg.mixed_precision: with autocast(): output = model(batch, val=True) else: output = model(batch, val=True) for key, val in output.items(): val_data[key] += [output[key]] for key, val in output.items(): value = val_data[key] if len(value[0].shape) == 0: val_data[key] = torch.stack(value) else: val_data[key] = torch.cat(value, dim=0) if cfg.save_val_data: torch.save(val_data, f"{cfg.output_dir}/{pre}_data_seed{cfg.seed}.pth") if "loss" in val_data: val_losses = val_data["loss"].cpu().numpy() val_loss = np.mean(val_losses) print(f"Mean {pre}_loss", np.mean(val_losses)) else: val_loss = 0.0 print("EVAL FINISHED") return val_loss # # Model class Net(nn.Module): def __init__(self, cfg): super(Net, self).__init__() self.cfg = cfg self.n_classes = cfg.n_classes self.mel_spec = ta.transforms.MelSpectrogram( sample_rate=cfg.sample_rate, n_fft=cfg.window_size, win_length=cfg.window_size, hop_length=cfg.hop_size, f_min=cfg.fmin, f_max=cfg.fmax, pad=0, n_mels=cfg.mel_bins, power=cfg.power, normalized=False, ) self.amplitude_to_db = ta.transforms.AmplitudeToDB(top_db=cfg.top_db) self.wav2img = torch.nn.Sequential(self.mel_spec, self.amplitude_to_db) self.backbone = timm.create_model( cfg.backbone, pretrained=cfg.pretrained, num_classes=0, global_pool="", in_chans=cfg.in_chans, ) if "efficientnet" in cfg.backbone: backbone_out = self.backbone.num_features else: backbone_out = self.backbone.feature_info[-1]["num_chs"] self.global_pool = GeM() self.head = nn.Linear(backbone_out, self.n_classes) if cfg.pretrained_weights is not None: sd = torch.load(cfg.pretrained_weights, map_location="cpu")["model"] sd = {k.replace("module.", ""): v for k, v in sd.items()} self.load_state_dict(sd, strict=True) print("weights loaded from", cfg.pretrained_weights) self.loss_fn = nn.BCEWithLogitsLoss(reduction="none") self.mixup = Mixup(mix_beta=cfg.mix_beta) self.factor = int(cfg.wav_crop_len / 5.0) def forward(self, batch, val=False): if not self.training: if not val: x = batch["input"] bs, parts, time = x.shape x = x.reshape(parts, time) y = batch["target"] y = y[0] else: x = batch["input"] y = batch["target"] bs, time = x.shape x = x.reshape(bs * self.factor, time // self.factor) else: x = batch["input"] y = batch["target"] bs, time = x.shape x = x.reshape(bs * self.factor, time // self.factor) with autocast(enabled=False): x = self.wav2img(x) # (bs, mel, time) if self.cfg.mel_norm: x = (x + 80) / 80 x = x.permute(0, 2, 1) x = x[:, None, :, :] weight = batch["weight"] if self.training or val: b, c, t, f = x.shape x = x.permute(0, 2, 1, 3) x = x.reshape(b // self.factor, self.factor * t, c, f) if self.cfg.mixup: x, y, weight = self.mixup(x, y, weight) # x, y = self.mixup(x, y) if self.cfg.mixup2: x, y, weight = self.mixup(x, y, weight) x = x.reshape(b, t, c, f) x = x.permute(0, 2, 1, 3) x = self.backbone(x) if self.training or val: b, c, t, f = x.shape x = x.permute(0, 2, 1, 3) x = x.reshape(b // self.factor, self.factor * t, c, f) x = x.permute(0, 2, 1, 3) x = self.global_pool(x) x = x[:, :, 0, 0] logits = self.head(x) loss = self.loss_fn(logits, y) loss = (loss.mean(dim=1) * weight) / weight.sum() loss = loss.sum() return { "loss": loss, "logits": logits.sigmoid(), "logits_raw": logits, "target": y, } def create_checkpoint(model, optimizer, epoch, scheduler=None, scaler=None): checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch, } if scheduler is not None: checkpoint["scheduler"] = scheduler.state_dict() if scaler is not None: checkpoint["scaler"] = scaler.state_dict() return checkpoint set_seed(cfg.seed) train_df = load_df(cfg) kf = StratifiedKFold(n_splits=5) for n, (trn_index, val_index) in enumerate( kf.split(train_df, train_df["primary_label"]) ): train_df.loc[val_index, "kfold"] = int(n) train_df["kfold"] = train_df["kfold"].astype(int) val_df = train_df[train_df["kfold"] == cfg.fold] val_dataset = CustomDataset(val_df, cfg, aug=cfg.val_aug, mode="train") val_dataloader = get_val_dataloader(val_dataset, cfg) train = train_df[train_df["kfold"] != cfg.fold] train_dataset = CustomDataset(train, cfg, aug=cfg.train_aug, mode="train") train_dataloader = get_train_dataloader(train_dataset, cfg) model = Net(cfg) model.to(cfg.device) total_steps = len(train_dataset) params = model.parameters() # optimizer = optim.Adam(params, lr=cfg.lr, weight_decay=0) optimizer = transformers.AdamW(params, lr=cfg.lr, weight_decay=cfg.weight_decay) scheduler = get_scheduler(cfg, optimizer, total_steps) device = cfg.device try: os.makedirs(cfg.output_dir) except: pass if cfg.mixed_precision: scaler = GradScaler() else: scaler = None cfg.curr_step = 0 i = 0 best_val_loss = np.inf optimizer.zero_grad() for epoch in range(cfg.epochs): set_seed(cfg.seed + epoch) cfg.curr_epoch = epoch print("EPOCH:", epoch) progress_bar = tqdm(range(len(train_dataloader))) tr_it = iter(train_dataloader) losses = [] gc.collect() if cfg.train: # ==== TRAIN LOOP for itr in progress_bar: i += 1 cfg.curr_step += cfg.batch_size data = next(tr_it) model.train() torch.set_grad_enabled(True) batch = batch_to_device(data, device) if cfg.mixed_precision: with autocast(): output_dict = model(batch) else: output_dict = model(batch) loss = output_dict["loss"] losses.append(loss.item()) if cfg.mixed_precision: scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() optimizer.zero_grad() else: loss.backward() optimizer.step() optimizer.zero_grad() if scheduler is not None: scheduler.step() if cfg.curr_step % cfg.batch_size == 0: progress_bar.set_description(f"loss: {np.mean(losses[-10:]):.4f}") if cfg.val: if (epoch + 1) % cfg.eval_epochs == 0 or (epoch + 1) == cfg.epochs: val_loss = run_eval(model, val_dataloader, cfg) else: val_score = 0 if cfg.epochs > 0: checkpoint = create_checkpoint( model, optimizer, epoch, scheduler=scheduler, scaler=scaler ) torch.save( checkpoint, f"{cfg.output_dir}/checkpoint_last_seed{cfg.seed}_{epoch}.pth" ) if cfg.epochs > 0: checkpoint = create_checkpoint( model, optimizer, epoch, scheduler=scheduler, scaler=scaler ) torch.save(checkpoint, f"{cfg.output_dir}/checkpoint_last_seed{cfg.seed}.pth")
false
0
10,340
0
10,340
10,340
129450093
# ### Fastai Tabular Application # For part 2 I thought I would try the tabular application in the fastai documentation. from fastai.tabular.all import * from fastai.imports import * import os from pathlib import Path import torch, numpy as np, pandas as pd # ### Exploratory Data Analysis path = Path("../input/titanic") df_train = pd.read_csv(path / "train.csv") df_test = pd.read_csv(path / "test.csv") print("|Training set|") print(df_train.isna().sum()) print("|Testing set|") df_test.isna().sum() # Let's compare our titanic dataset with the adult dataset in the tutorial. We also have continuous columns like age and fare, which we can feed directly into our model. As for the categorial ones, like Pclass, Sex and Cabin, we'll convert them into unique index for the embedding layers. # The coolest part about the TabularDataLoaders is, it will fill in the missing data and normalize continuous variables for us. dls = TabularDataLoaders.from_csv( path / "train.csv", path=path, y_names="Survived", cat_names=["Pclass", "Sex", "Embarked", "Cabin"], cont_names=["Age", "Fare"], procs=[Categorify, FillMissing, Normalize], ) splits = RandomSplitter(valid_pct=0.2)(range_of(df_train)) to = TabularPandas( df_train, procs=[Categorify, FillMissing, Normalize], cat_names=["Pclass", "Sex", "Embarked", "Cabin"], cont_names=["Age", "Fare"], y_names="Survived", splits=splits, ) dls = to.dataloaders(bs=24) to.xs.iloc[:2] dls.show_batch() learn = tabular_learner(dls, metrics=accuracy) learn.fit_one_cycle(1) learn.show_results() # Although we were able to train a model under 10 minutes, the accuracy rate isn't great. Perhaps addressing the missing values using feature engineering techniques might help. I'll be following this notebook - Advanced Feature Engineering Tutorial to clean our data. # ### Missing Data import matplotlib as plt import seaborn as sns sns.set(style="darkgrid") # # ### Age: imputation based on correlation scores # Before imputation, I want to see which feature is the best predictor of survival rate. from sklearn.linear_model import LogisticRegression df_all = pd.concat([df_train, df_test], sort=True).reset_index(drop=True) # drop columns not needed for this analysis df_all = df_all.drop(["PassengerId", "Name", "Ticket", "Embarked"], axis=1) # convert categorical variables to numerical df_all = pd.get_dummies(df_all, columns=["Sex"], drop_first=True) df_all.dtypes(10) X_train = df_all.drop("Survived", axis=1) y_train = df_all["Survived"] logreg = LogisticRegression(max_iter=10000) logreg.fit(X_train, y_train) df_all_corr = ( df_all.corr() .abs() .unstack() .sort_values(kind="quicksort", ascending=False) .reset_index() ) df_all_corr.rename( columns={ "level_0": "Feature 1", "level_1": "Feature 2", 0: "Correlation Coefficient", }, inplace=True, ) df_all_corr[df_all_corr["Feature 1"] == "Age"] pclass_age = df_all.groupby("Pclass").Age.median() df_all["Age"] = df_all.apply( lambda row: pclass_age[row["Pclass"]] if pd.isnull(row["Age"]) else row["Age"], axis=1, ) df_all.isna().sum() # ### Cabin # I really like the approach of replacing cabin with deck. If you want to read the full detail, please refer to AFET. df_all["Deck"] = df_all["Cabin"].apply(lambda s: s[0] if pd.notnull(s) else "M") df_all = df_all.drop(columns=["Cabin"]) df_all.isna().sum() splits = RandomSplitter(valid_pct=0.2)(range_of(df_train)) to = TabularPandas( df_train, procs=[Categorify, FillMissing, Normalize], cat_names=["Pclass", "Sex", "Embarked", "Deck"], cont_names=["Age", "Fare"], y_names="Survived", splits=splits, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450093.ipynb
null
null
[{"Id": 129450093, "ScriptId": 38435965, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8109381, "CreationDate": "05/13/2023 23:47:00", "VersionNumber": 2.0, "Title": "Using Fastai Tabular Application (Titanic dataset)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 75.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 38.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ### Fastai Tabular Application # For part 2 I thought I would try the tabular application in the fastai documentation. from fastai.tabular.all import * from fastai.imports import * import os from pathlib import Path import torch, numpy as np, pandas as pd # ### Exploratory Data Analysis path = Path("../input/titanic") df_train = pd.read_csv(path / "train.csv") df_test = pd.read_csv(path / "test.csv") print("|Training set|") print(df_train.isna().sum()) print("|Testing set|") df_test.isna().sum() # Let's compare our titanic dataset with the adult dataset in the tutorial. We also have continuous columns like age and fare, which we can feed directly into our model. As for the categorial ones, like Pclass, Sex and Cabin, we'll convert them into unique index for the embedding layers. # The coolest part about the TabularDataLoaders is, it will fill in the missing data and normalize continuous variables for us. dls = TabularDataLoaders.from_csv( path / "train.csv", path=path, y_names="Survived", cat_names=["Pclass", "Sex", "Embarked", "Cabin"], cont_names=["Age", "Fare"], procs=[Categorify, FillMissing, Normalize], ) splits = RandomSplitter(valid_pct=0.2)(range_of(df_train)) to = TabularPandas( df_train, procs=[Categorify, FillMissing, Normalize], cat_names=["Pclass", "Sex", "Embarked", "Cabin"], cont_names=["Age", "Fare"], y_names="Survived", splits=splits, ) dls = to.dataloaders(bs=24) to.xs.iloc[:2] dls.show_batch() learn = tabular_learner(dls, metrics=accuracy) learn.fit_one_cycle(1) learn.show_results() # Although we were able to train a model under 10 minutes, the accuracy rate isn't great. Perhaps addressing the missing values using feature engineering techniques might help. I'll be following this notebook - Advanced Feature Engineering Tutorial to clean our data. # ### Missing Data import matplotlib as plt import seaborn as sns sns.set(style="darkgrid") # # ### Age: imputation based on correlation scores # Before imputation, I want to see which feature is the best predictor of survival rate. from sklearn.linear_model import LogisticRegression df_all = pd.concat([df_train, df_test], sort=True).reset_index(drop=True) # drop columns not needed for this analysis df_all = df_all.drop(["PassengerId", "Name", "Ticket", "Embarked"], axis=1) # convert categorical variables to numerical df_all = pd.get_dummies(df_all, columns=["Sex"], drop_first=True) df_all.dtypes(10) X_train = df_all.drop("Survived", axis=1) y_train = df_all["Survived"] logreg = LogisticRegression(max_iter=10000) logreg.fit(X_train, y_train) df_all_corr = ( df_all.corr() .abs() .unstack() .sort_values(kind="quicksort", ascending=False) .reset_index() ) df_all_corr.rename( columns={ "level_0": "Feature 1", "level_1": "Feature 2", 0: "Correlation Coefficient", }, inplace=True, ) df_all_corr[df_all_corr["Feature 1"] == "Age"] pclass_age = df_all.groupby("Pclass").Age.median() df_all["Age"] = df_all.apply( lambda row: pclass_age[row["Pclass"]] if pd.isnull(row["Age"]) else row["Age"], axis=1, ) df_all.isna().sum() # ### Cabin # I really like the approach of replacing cabin with deck. If you want to read the full detail, please refer to AFET. df_all["Deck"] = df_all["Cabin"].apply(lambda s: s[0] if pd.notnull(s) else "M") df_all = df_all.drop(columns=["Cabin"]) df_all.isna().sum() splits = RandomSplitter(valid_pct=0.2)(range_of(df_train)) to = TabularPandas( df_train, procs=[Categorify, FillMissing, Normalize], cat_names=["Pclass", "Sex", "Embarked", "Deck"], cont_names=["Age", "Fare"], y_names="Survived", splits=splits, )
false
0
1,189
0
1,189
1,189
129450726
# # **Baseline Random Forrest** # Import necessary libraries import pandas as pd import numpy as np from fastai.tabular.all import * from sklearn.ensemble import RandomForestClassifier data_folder = "../input/icr-identify-age-related-conditions/" # Load the data into a pandas dataframe train_df = pd.read_csv(f"{data_folder}train.csv") test_df = pd.read_csv(f"{data_folder}test.csv") # Check the size and shape of the data print(f"Training data shape: {train_df.shape}") print(f"Testing data shape: {test_df.shape}") # Check the features of the data print(f"Training data features: {train_df.columns}") print(f"Testing data features: {test_df.columns}") test_df.head() train = train_df.drop(["Id"], axis=1) test = test_df.drop(["Id"], axis=1) # Fill numeric columns with median train_numeric = train.select_dtypes(include=np.number) test_numeric = test.select_dtypes(include=np.number) train_numeric = train_numeric.fillna(train_numeric.median()) test_numeric = test_numeric.fillna(train_numeric.median()) # Fill non-numeric columns with mode train_non_numeric = train.select_dtypes(exclude=np.number) test_non_numeric = test.select_dtypes(exclude=np.number) train_non_numeric = train_non_numeric.fillna(train_non_numeric.mode().iloc[0]) test_non_numeric = test_non_numeric.fillna(train_non_numeric.mode().iloc[0]) # Concatenate the numeric and non-numeric columns train = pd.concat([train_numeric, train_non_numeric], axis=1) test = pd.concat([test_numeric, test_non_numeric], axis=1) test dep_var = "Class" procs = [Categorify, Normalize] cont, cat = cont_cat_split(train, 1, dep_var=dep_var) # Split the data into training and validation sets splits = RandomSplitter(valid_pct=0.2, seed=42)(range_of(train)) # Create a TabularPandas object to = TabularPandas(train, procs, cat, cont, y_names=dep_var, splits=splits) # Create data loader object dls = to.dataloaders() # Create a Random Forest Classifier object rf_model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42) # Train the model using the data from dataloaders rf_model.fit(dls.train_ds.xs, dls.train_ds.ys.values.ravel()) # Apply pre-processing steps to the test data using the TabularPandas object to_test = to.new(test) to_test.process() # # Make predictions using the trained random forest model test_dl = dls.test_dl(to_test) predictions = rf_model.predict_proba(test_dl.xs) predictions test_ids = test_df["Id"] # Create a DataFrame with the predicted probabilities submission_df = pd.DataFrame(predictions, columns=["class_0", "class_1"]) # Add the Id column to the submission DataFrame submission_df.insert(0, "Id", test_ids) submission_df submission_df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450726.ipynb
null
null
[{"Id": 129450726, "ScriptId": 38415677, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12301645, "CreationDate": "05/14/2023 00:00:10", "VersionNumber": 5.0, "Title": "ICR: Baseline Random Forest", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 23.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 61.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
null
null
null
null
# # **Baseline Random Forrest** # Import necessary libraries import pandas as pd import numpy as np from fastai.tabular.all import * from sklearn.ensemble import RandomForestClassifier data_folder = "../input/icr-identify-age-related-conditions/" # Load the data into a pandas dataframe train_df = pd.read_csv(f"{data_folder}train.csv") test_df = pd.read_csv(f"{data_folder}test.csv") # Check the size and shape of the data print(f"Training data shape: {train_df.shape}") print(f"Testing data shape: {test_df.shape}") # Check the features of the data print(f"Training data features: {train_df.columns}") print(f"Testing data features: {test_df.columns}") test_df.head() train = train_df.drop(["Id"], axis=1) test = test_df.drop(["Id"], axis=1) # Fill numeric columns with median train_numeric = train.select_dtypes(include=np.number) test_numeric = test.select_dtypes(include=np.number) train_numeric = train_numeric.fillna(train_numeric.median()) test_numeric = test_numeric.fillna(train_numeric.median()) # Fill non-numeric columns with mode train_non_numeric = train.select_dtypes(exclude=np.number) test_non_numeric = test.select_dtypes(exclude=np.number) train_non_numeric = train_non_numeric.fillna(train_non_numeric.mode().iloc[0]) test_non_numeric = test_non_numeric.fillna(train_non_numeric.mode().iloc[0]) # Concatenate the numeric and non-numeric columns train = pd.concat([train_numeric, train_non_numeric], axis=1) test = pd.concat([test_numeric, test_non_numeric], axis=1) test dep_var = "Class" procs = [Categorify, Normalize] cont, cat = cont_cat_split(train, 1, dep_var=dep_var) # Split the data into training and validation sets splits = RandomSplitter(valid_pct=0.2, seed=42)(range_of(train)) # Create a TabularPandas object to = TabularPandas(train, procs, cat, cont, y_names=dep_var, splits=splits) # Create data loader object dls = to.dataloaders() # Create a Random Forest Classifier object rf_model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42) # Train the model using the data from dataloaders rf_model.fit(dls.train_ds.xs, dls.train_ds.ys.values.ravel()) # Apply pre-processing steps to the test data using the TabularPandas object to_test = to.new(test) to_test.process() # # Make predictions using the trained random forest model test_dl = dls.test_dl(to_test) predictions = rf_model.predict_proba(test_dl.xs) predictions test_ids = test_df["Id"] # Create a DataFrame with the predicted probabilities submission_df = pd.DataFrame(predictions, columns=["class_0", "class_1"]) # Add the Id column to the submission DataFrame submission_df.insert(0, "Id", test_ids) submission_df submission_df.to_csv("submission.csv", index=False)
false
0
845
7
845
845
129450418
# # Objetivo do projeto # Esse projeto tem como objetivo analizar os habitos de compras dos clientes de uma loja # os dados originais desse projeto podem ser encontrados no [Link](https://github.com/andre-marcos-perez/ebac-course-utils) porem para essa analize o desenvolvedor optou pela utilizacao de um dataset reduzido, que pode ser encondardo no [Link](https://www.kaggle.com/datasets/giovanipedroso/exercicio-ebac-sql). Nesse notebook so sera realizada a analize dos dados o processamento foi realizado do AWS athena, as queries estarao nos topicos em que sera realizada a analize # # Import das bibliotecas import pandas as pd import seaborn as sns # # Carregamento dos dados dados_ori = pd.read_csv( "/kaggle/input/exercicio-ebac-sql/Profissao Analista de dados M36 Exercicio credito.csv" ) query_1 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_1.csv") query_2 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_2.csv") query_3 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_3.csv") query_4 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_4.csv") query_5 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_5.csv") query_6 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_6.csv") query_7 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_7.csv") query_8 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_8.csv") query_9 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_9.csv") query_10 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_10.csv") # # Analize # ## Criacao da tabela # ```sql # CREATE EXTERNAL TABLE credito( # idade BIGINT, # sexo STRING, # dependentes BIGINT, # escolaridade STRING, # estado_civil STRING, # salario_anual STRING, # tipo_cartao STRING, # qtd_produtos BIGINT, # iteracoes_12m INT, # meses_inativo_12m INT, # limite_credito DOUBLE, # valor_transacoes_12m DOUBLE, # qtd_transacoes_12m BIGINT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' # WITH SERDEPROPERTIES ('separatorChar' = ',', 'quoteChar' = '"', 'escapeChar' = '\\') # STORED AS TEXTFILE # LOCATION 's3://module-36-ebac-sql/' # ``` dados_ori.head() query_1.head() # Aqui temos uma visao geral dos dados # ## Quantidade total de dados # ```sql # SELECT count(*) from credito # ``` query_2.head() # No total ha 2564 clientes nessa base de dados # ## Escolaridade dos clientes # ```sql # SELECT distinct escolaridade from credito # ``` query_3.head() # Os tipos de escolaridade presente nesse dataset # ## Analize da faixa salarial query_4.head() sns.barplot(x=query_4["salario_anual"], y=query_4["_col0"]) # Aqui temos a distribuicao da faixa salarial, como era de se esperar as pessoas com a menor faixa salarial sao a maioria do grupo # ## Tipo de cartoes emitidos query_5.head() sns.barplot(x=query_5["tipo_cartao"], y=query_5["_col0"]) # A esmagadora maioria dos clientes dessa loja possuem um cartao do tipo blue # ## Genero dos clientes query_6.head() sns.barplot(x=query_6["sexo"], y=query_6["_col0"]) # A maior parte dos clientes dessa loja sao pessoas que se identificao com o sexo masculino sendo 1,5 vezes maior do que os clientes que se identificao com o genero feminino # ## Idade minima e maxima dos clientes query_7.head() # A idade dos clientes variao de 26 anos para 73 anos # ## Valor gasto minimo e maximo query_8.head() # ## # ## Relacao enter escolaridade e limite de credito query_9.head() # ## Analize de gastos dividido por genero query_10.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450418.ipynb
null
null
[{"Id": 129450418, "ScriptId": 38489865, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13768217, "CreationDate": "05/13/2023 23:54:14", "VersionNumber": 1.0, "Title": "Exercicio EBAC SQL Final", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Objetivo do projeto # Esse projeto tem como objetivo analizar os habitos de compras dos clientes de uma loja # os dados originais desse projeto podem ser encontrados no [Link](https://github.com/andre-marcos-perez/ebac-course-utils) porem para essa analize o desenvolvedor optou pela utilizacao de um dataset reduzido, que pode ser encondardo no [Link](https://www.kaggle.com/datasets/giovanipedroso/exercicio-ebac-sql). Nesse notebook so sera realizada a analize dos dados o processamento foi realizado do AWS athena, as queries estarao nos topicos em que sera realizada a analize # # Import das bibliotecas import pandas as pd import seaborn as sns # # Carregamento dos dados dados_ori = pd.read_csv( "/kaggle/input/exercicio-ebac-sql/Profissao Analista de dados M36 Exercicio credito.csv" ) query_1 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_1.csv") query_2 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_2.csv") query_3 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_3.csv") query_4 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_4.csv") query_5 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_5.csv") query_6 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_6.csv") query_7 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_7.csv") query_8 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_8.csv") query_9 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_9.csv") query_10 = pd.read_csv("/kaggle/input/exercicio-ebac-sql/query_10.csv") # # Analize # ## Criacao da tabela # ```sql # CREATE EXTERNAL TABLE credito( # idade BIGINT, # sexo STRING, # dependentes BIGINT, # escolaridade STRING, # estado_civil STRING, # salario_anual STRING, # tipo_cartao STRING, # qtd_produtos BIGINT, # iteracoes_12m INT, # meses_inativo_12m INT, # limite_credito DOUBLE, # valor_transacoes_12m DOUBLE, # qtd_transacoes_12m BIGINT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' # WITH SERDEPROPERTIES ('separatorChar' = ',', 'quoteChar' = '"', 'escapeChar' = '\\') # STORED AS TEXTFILE # LOCATION 's3://module-36-ebac-sql/' # ``` dados_ori.head() query_1.head() # Aqui temos uma visao geral dos dados # ## Quantidade total de dados # ```sql # SELECT count(*) from credito # ``` query_2.head() # No total ha 2564 clientes nessa base de dados # ## Escolaridade dos clientes # ```sql # SELECT distinct escolaridade from credito # ``` query_3.head() # Os tipos de escolaridade presente nesse dataset # ## Analize da faixa salarial query_4.head() sns.barplot(x=query_4["salario_anual"], y=query_4["_col0"]) # Aqui temos a distribuicao da faixa salarial, como era de se esperar as pessoas com a menor faixa salarial sao a maioria do grupo # ## Tipo de cartoes emitidos query_5.head() sns.barplot(x=query_5["tipo_cartao"], y=query_5["_col0"]) # A esmagadora maioria dos clientes dessa loja possuem um cartao do tipo blue # ## Genero dos clientes query_6.head() sns.barplot(x=query_6["sexo"], y=query_6["_col0"]) # A maior parte dos clientes dessa loja sao pessoas que se identificao com o sexo masculino sendo 1,5 vezes maior do que os clientes que se identificao com o genero feminino # ## Idade minima e maxima dos clientes query_7.head() # A idade dos clientes variao de 26 anos para 73 anos # ## Valor gasto minimo e maximo query_8.head() # ## # ## Relacao enter escolaridade e limite de credito query_9.head() # ## Analize de gastos dividido por genero query_10.head()
false
0
1,264
0
1,264
1,264
129450958
<jupyter_start><jupyter_text>Heart Disease Cleveland UCI ### Context The data is already presented in [https://www.kaggle.com/ronitf/heart-disease-uci](https://www.kaggle.com/ronitf/heart-disease-uci) but there are some descriptions and values that are wrong as discussed in [https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877](https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877). So, here is re-processed dataset that was cross-checked with the original data [https://archive.ics.uci.edu/ml/datasets/Heart+Disease](https://archive.ics.uci.edu/ml/datasets/Heart+Disease). ### Content There are 13 attributes 1. age: age in years 2. sex: sex (1 = male; 0 = female) 3. cp: chest pain type -- Value 0: typical angina -- Value 1: atypical angina -- Value 2: non-anginal pain -- Value 3: asymptomatic 4. trestbps: resting blood pressure (in mm Hg on admission to the hospital) 5. chol: serum cholestoral in mg/dl 6. fbs: (fasting blood sugar &gt; 120 mg/dl) (1 = true; 0 = false) 7. restecg: resting electrocardiographic results -- Value 0: normal -- Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of &gt; 0.05 mV) -- Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria 8. thalach: maximum heart rate achieved 9. exang: exercise induced angina (1 = yes; 0 = no) 10. oldpeak = ST depression induced by exercise relative to rest 11. slope: the slope of the peak exercise ST segment -- Value 0: upsloping -- Value 1: flat -- Value 2: downsloping 12. ca: number of major vessels (0-3) colored by flourosopy 13. thal: 0 = normal; 1 = fixed defect; 2 = reversable defect and the label 14. condition: 0 = no disease, 1 = disease Kaggle dataset identifier: heart-disease-cleveland-uci <jupyter_script>import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import missingno ## metrics to evaluate from sklearn.metrics import mean_squared_error from sklearn.metrics import accuracy_score df = pd.read_csv("/kaggle/input/heart-disease-cleveland-uci/heart_cleveland_upload.csv") df.shape df.head() df.info() df.isnull().sum() df.describe().T missingno.matrix(df) df.corr() plt.figure(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True) # ## From this you can see that the affecting feature in the comdition of the heart is: # - age # - sex # - cp # - thalach # - exang # - oldpeak # - slope # - ca # - thal # # ### we might work on these features to get to know there affection better # ## AGE df[["age"]].describe() sns.boxplot(data=df, x="age") # The data looks good no outliers # so let's look to the realtion with the Condition of the heart sns.lineplot(data=df, y="condition", x="age", estimator=len) plt.title("AGE VS HEARTCONDITION") plt.show() # here it's obvious that while getting older the propapility to have a bad condition increases but after a certail age it falls again that might be that people who hav a bad condition unfortunately dowsn't make it # ## Sex df[["sex"]].describe() ct = pd.crosstab(df["sex"], df["condition"], normalize="index") ct.plot(kind="bar", stacked=True) plt.xlabel("Condition") plt.ylabel("Proportion") plt.title("Relationship between sex and Heart Condition") plt.show() ct ct cm = ct.to_numpy() sns.heatmap(cm, annot=True) print("Confusion matrix:\n", cm) # ## والله الرجاله شكلها عندها امراض قلب اكتر # ## CP df["cp"].unique() df["cp"].value_counts() sns.barplot(x=df["cp"], y=df["condition"]) plt.figure(figsize=(20, 10)) sns.boxplot(x=df["cp"], y=df["condition"]) plt.show() sns.histplot(data=df, x="condition", hue="cp") # ## thalach df["thalach"].unique() df[["thalach"]].value_counts() df[["thalach"]].describe() sns.boxplot(df["thalach"]) sns.kdeplot(data=df, y="condition", x="thalach") sns.histplot(x=df["thalach"], hue=df["condition"]) # ## exang df["exang"] df["exang"].value_counts() ct = pd.crosstab(df["exang"], df["condition"], normalize="index") ct.plot(kind="bar", stacked=True) plt.xlabel("Condition") plt.ylabel("Proportion") plt.title("Relationship between sex and Heart Condition") plt.show() ct cm = ct.to_numpy() sns.heatmap(cm, annot=True) print("Confusion matrix:\n", cm) # ## oldpeak df["oldpeak"].describe().T sns.boxplot(df["oldpeak"]) sns.histplot(x=df["oldpeak"], hue=df["condition"]) # ## Let's normalise the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # ## Split the data from sklearn.model_selection import train_test_split # X is the feature matrix, y is the target variable X_train, X_test, y_train, y_test = train_test_split( df, df["condition"], test_size=0.2, random_state=42 ) x_tr = scaler.fit_transform(X_train) x_tst = scaler.transform(X_test) # ## Models we will try # ### 1)Logistic Regression from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=30) clf.fit(x_tr, y_train) y_pred = clf.predict(x_tst) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) intercept = clf.intercept_ # Coefficients for each feature coefficients = clf.coef_ print("intercept : ", intercept) print("coefficients : ", coefficients) # ### 2) Decision tree classifier from sklearn.tree import DecisionTreeClassifier DTC = DecisionTreeClassifier(max_depth=50) DTC.fit(x_tr, y_train) y_pred = DTC.predict(x_tst) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # ### 3)RandomForestClassifier from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=42) # train the model rf.fit(x_tr, y_train) # predict on test data y_pred = rf.predict(x_tst) print("Number of trees:", rf.n_estimators) print("Max depth:", rf.max_depth) print("Min samples split:", rf.min_samples_split) print("Min samples leaf:", rf.min_samples_leaf) print("Max features:", rf.max_features) accuracy = accuracy_score(y_test, y_pred) print("Accuracy: ", accuracy) # ### 4)SupportVectorClassifier from sklearn.svm import SVR # train the SVR model svr = SVR(kernel="rbf", C=1, gamma="auto") svr.fit(x_tr, y_train) # evaluate the model on the test set score = svr.score(x_tst, y_test) print("R^2 score: {:.2f}".format(score)) y_pred = svr.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 5) Gradient Boost Classifier from sklearn.ensemble import GradientBoostingClassifier # Define the model model = GradientBoostingClassifier(n_estimators=100, max_depth=3, random_state=42) # Train the model on the training data model.fit(X_train, y_train) # Predict the target variable on the test data y_pred = model.predict(X_test) # Get the accuracy score accuracy = model.score(X_test, y_test) # Print the accuracy score print("Accuracy:", accuracy) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 6) XGBoost import xgboost as xgb clf = xgb.XGBClassifier() clf.fit(x_tr, y_train) y_pred = clf.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) accuracy = model.score(X_test, y_test) print("Accuracy:", accuracy) # ### 7) CatBoost # from catboost import CatBoostClassifier # Instantiate the CatBoostClassifier catboost_model = CatBoostClassifier( iterations=1000, learning_rate=0.1, loss_function="Logloss", verbose=True ) # Fit the model on the training data catboost_model.fit(X_train, y_train) # Make predictions on the test data y_pred = catboost_model.predict(X_test) # Evaluate the model using accuracy_score accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # CatBoost is designed to handle categorical data and improve the gradient boosting framework. # CatBoost implements the gradient boosting decision tree algorithm and uses a variant of gradient descent called ordered boosting. It also incorporates several unique features, such as: # Efficient handling of categorical features by performing numerical encoding # A novel method for handling missing values # Built-in support for parallel training on CPU and GPU # CatBoost employs a combination of ordered boosting and random permutations of the input features to reduce overfitting, and thus improve the accuracy and generalization of the model. # One key advantage of CatBoost is that it can automatically handle categorical features without the need for pre-processing, which can be particularly useful in real-world scenarios where the data may contain a mix of categorical and continuous variables. # Overall, CatBoost is a powerful algorithm for handling tabular data with categorical features and has shown to be effective in a variety of machine learning tasks, including classification, regression, and ranking. # ### 8) LightGBM Classifier import lightgbm as lgb lgbm_clf = lgb.LGBMClassifier() # Train the model lgbm_clf.fit(x_tr, y_train) # Predict the classes for test set y_pred = lgbm_clf.predict(x_tst) # Calculate the accuracy score of the model accuracy = accuracy_score(y_test, y_pred) print(f"Accuracy: {accuracy}") mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 9) Naive Bayes Classifier from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train) y_pred = gnb.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) accuracy = accuracy_score(y_test, y_pred) print(f"Accuracy: {accuracy}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/450/129450958.ipynb
heart-disease-cleveland-uci
cherngs
[{"Id": 129450958, "ScriptId": 38490685, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12110194, "CreationDate": "05/14/2023 00:04:18", "VersionNumber": 1.0, "Title": "Model training", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 340.0, "LinesInsertedFromPrevious": 340.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 12}]
[{"Id": 185514297, "KernelVersionId": 129450958, "SourceDatasetVersionId": 1043970}]
[{"Id": 1043970, "DatasetId": 576697, "DatasourceVersionId": 1073183, "CreatorUserId": 4139262, "LicenseName": "Reddit API Terms", "CreationDate": "03/29/2020 12:06:31", "VersionNumber": 1.0, "Title": "Heart Disease Cleveland UCI", "Slug": "heart-disease-cleveland-uci", "Subtitle": "Re-processed heart disease data", "Description": "### Context\nThe data is already presented in [https://www.kaggle.com/ronitf/heart-disease-uci](https://www.kaggle.com/ronitf/heart-disease-uci) but there are some descriptions and values that are wrong as discussed in [https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877](https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877). So, here is re-processed dataset that was cross-checked with the original data [https://archive.ics.uci.edu/ml/datasets/Heart+Disease](https://archive.ics.uci.edu/ml/datasets/Heart+Disease).\n\n### Content\nThere are 13 attributes\n1. age: age in years\n2. sex: sex (1 = male; 0 = female)\n3. cp: chest pain type\n -- Value 0: typical angina\n -- Value 1: atypical angina\n -- Value 2: non-anginal pain\n -- Value 3: asymptomatic\n4. trestbps: resting blood pressure (in mm Hg on admission to the hospital)\n5. chol: serum cholestoral in mg/dl \n6. fbs: (fasting blood sugar &gt; 120 mg/dl) (1 = true; 0 = false)\n7. restecg: resting electrocardiographic results\n -- Value 0: normal\n -- Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of &gt; 0.05 mV)\n -- Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria\n8. thalach: maximum heart rate achieved\n9. exang: exercise induced angina (1 = yes; 0 = no)\n10. oldpeak = ST depression induced by exercise relative to rest\n11. slope: the slope of the peak exercise ST segment\n -- Value 0: upsloping\n -- Value 1: flat\n -- Value 2: downsloping\n12. ca: number of major vessels (0-3) colored by flourosopy\n13. thal: 0 = normal; 1 = fixed defect; 2 = reversable defect \nand the label\n14. condition: 0 = no disease, 1 = disease\n\n### Acknowledgements\nData posted on Kaggle: [https://www.kaggle.com/ronitf/heart-disease-uci](https://www.kaggle.com/ronitf/heart-disease-uci)\nDescription of the data above: [https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877](https://www.kaggle.com/ronitf/heart-disease-uci/discussion/105877)\nOriginal data [https://archive.ics.uci.edu/ml/datasets/Heart+Disease](https://archive.ics.uci.edu/ml/datasets/Heart+Disease)\n\nCreators:\nHungarian Institute of Cardiology. Budapest: Andras Janosi, M.D.\nUniversity Hospital, Zurich, Switzerland: William Steinbr\nCreators:\nHungarian Institute of Cardiology. Budapest: Andras Janosi, M.D.\nUniversity Hospital, Zurich, Switzerland: William Steinbrunn, M.D.\nUniversity Hospital, Basel, Switzerland: Matthias Pfisterer, M.D.\nV.A. Medical Center, Long Beach and Cleveland Clinic Foundation: Robert Detrano, M.D., Ph.D.\nDonor: David W. Aha (aha '@' ics.uci.edu) (714) 856-8779\n\n### Inspiration\nWith the attributes described above, can you predict if a patient has heart disease?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 576697, "CreatorUserId": 4139262, "OwnerUserId": 4139262.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1043970.0, "CurrentDatasourceVersionId": 1073183.0, "ForumId": 590493, "Type": 2, "CreationDate": "03/29/2020 12:06:31", "LastActivityDate": "03/29/2020", "TotalViews": 90419, "TotalDownloads": 14884, "TotalVotes": 131, "TotalKernels": 81}]
[{"Id": 4139262, "UserName": "cherngs", "DisplayName": "Cherngs", "RegisterDate": "12/02/2019", "PerformanceTier": 1}]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import missingno ## metrics to evaluate from sklearn.metrics import mean_squared_error from sklearn.metrics import accuracy_score df = pd.read_csv("/kaggle/input/heart-disease-cleveland-uci/heart_cleveland_upload.csv") df.shape df.head() df.info() df.isnull().sum() df.describe().T missingno.matrix(df) df.corr() plt.figure(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True) # ## From this you can see that the affecting feature in the comdition of the heart is: # - age # - sex # - cp # - thalach # - exang # - oldpeak # - slope # - ca # - thal # # ### we might work on these features to get to know there affection better # ## AGE df[["age"]].describe() sns.boxplot(data=df, x="age") # The data looks good no outliers # so let's look to the realtion with the Condition of the heart sns.lineplot(data=df, y="condition", x="age", estimator=len) plt.title("AGE VS HEARTCONDITION") plt.show() # here it's obvious that while getting older the propapility to have a bad condition increases but after a certail age it falls again that might be that people who hav a bad condition unfortunately dowsn't make it # ## Sex df[["sex"]].describe() ct = pd.crosstab(df["sex"], df["condition"], normalize="index") ct.plot(kind="bar", stacked=True) plt.xlabel("Condition") plt.ylabel("Proportion") plt.title("Relationship between sex and Heart Condition") plt.show() ct ct cm = ct.to_numpy() sns.heatmap(cm, annot=True) print("Confusion matrix:\n", cm) # ## والله الرجاله شكلها عندها امراض قلب اكتر # ## CP df["cp"].unique() df["cp"].value_counts() sns.barplot(x=df["cp"], y=df["condition"]) plt.figure(figsize=(20, 10)) sns.boxplot(x=df["cp"], y=df["condition"]) plt.show() sns.histplot(data=df, x="condition", hue="cp") # ## thalach df["thalach"].unique() df[["thalach"]].value_counts() df[["thalach"]].describe() sns.boxplot(df["thalach"]) sns.kdeplot(data=df, y="condition", x="thalach") sns.histplot(x=df["thalach"], hue=df["condition"]) # ## exang df["exang"] df["exang"].value_counts() ct = pd.crosstab(df["exang"], df["condition"], normalize="index") ct.plot(kind="bar", stacked=True) plt.xlabel("Condition") plt.ylabel("Proportion") plt.title("Relationship between sex and Heart Condition") plt.show() ct cm = ct.to_numpy() sns.heatmap(cm, annot=True) print("Confusion matrix:\n", cm) # ## oldpeak df["oldpeak"].describe().T sns.boxplot(df["oldpeak"]) sns.histplot(x=df["oldpeak"], hue=df["condition"]) # ## Let's normalise the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # ## Split the data from sklearn.model_selection import train_test_split # X is the feature matrix, y is the target variable X_train, X_test, y_train, y_test = train_test_split( df, df["condition"], test_size=0.2, random_state=42 ) x_tr = scaler.fit_transform(X_train) x_tst = scaler.transform(X_test) # ## Models we will try # ### 1)Logistic Regression from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=30) clf.fit(x_tr, y_train) y_pred = clf.predict(x_tst) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) intercept = clf.intercept_ # Coefficients for each feature coefficients = clf.coef_ print("intercept : ", intercept) print("coefficients : ", coefficients) # ### 2) Decision tree classifier from sklearn.tree import DecisionTreeClassifier DTC = DecisionTreeClassifier(max_depth=50) DTC.fit(x_tr, y_train) y_pred = DTC.predict(x_tst) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # ### 3)RandomForestClassifier from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=42) # train the model rf.fit(x_tr, y_train) # predict on test data y_pred = rf.predict(x_tst) print("Number of trees:", rf.n_estimators) print("Max depth:", rf.max_depth) print("Min samples split:", rf.min_samples_split) print("Min samples leaf:", rf.min_samples_leaf) print("Max features:", rf.max_features) accuracy = accuracy_score(y_test, y_pred) print("Accuracy: ", accuracy) # ### 4)SupportVectorClassifier from sklearn.svm import SVR # train the SVR model svr = SVR(kernel="rbf", C=1, gamma="auto") svr.fit(x_tr, y_train) # evaluate the model on the test set score = svr.score(x_tst, y_test) print("R^2 score: {:.2f}".format(score)) y_pred = svr.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 5) Gradient Boost Classifier from sklearn.ensemble import GradientBoostingClassifier # Define the model model = GradientBoostingClassifier(n_estimators=100, max_depth=3, random_state=42) # Train the model on the training data model.fit(X_train, y_train) # Predict the target variable on the test data y_pred = model.predict(X_test) # Get the accuracy score accuracy = model.score(X_test, y_test) # Print the accuracy score print("Accuracy:", accuracy) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 6) XGBoost import xgboost as xgb clf = xgb.XGBClassifier() clf.fit(x_tr, y_train) y_pred = clf.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) accuracy = model.score(X_test, y_test) print("Accuracy:", accuracy) # ### 7) CatBoost # from catboost import CatBoostClassifier # Instantiate the CatBoostClassifier catboost_model = CatBoostClassifier( iterations=1000, learning_rate=0.1, loss_function="Logloss", verbose=True ) # Fit the model on the training data catboost_model.fit(X_train, y_train) # Make predictions on the test data y_pred = catboost_model.predict(X_test) # Evaluate the model using accuracy_score accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # CatBoost is designed to handle categorical data and improve the gradient boosting framework. # CatBoost implements the gradient boosting decision tree algorithm and uses a variant of gradient descent called ordered boosting. It also incorporates several unique features, such as: # Efficient handling of categorical features by performing numerical encoding # A novel method for handling missing values # Built-in support for parallel training on CPU and GPU # CatBoost employs a combination of ordered boosting and random permutations of the input features to reduce overfitting, and thus improve the accuracy and generalization of the model. # One key advantage of CatBoost is that it can automatically handle categorical features without the need for pre-processing, which can be particularly useful in real-world scenarios where the data may contain a mix of categorical and continuous variables. # Overall, CatBoost is a powerful algorithm for handling tabular data with categorical features and has shown to be effective in a variety of machine learning tasks, including classification, regression, and ranking. # ### 8) LightGBM Classifier import lightgbm as lgb lgbm_clf = lgb.LGBMClassifier() # Train the model lgbm_clf.fit(x_tr, y_train) # Predict the classes for test set y_pred = lgbm_clf.predict(x_tst) # Calculate the accuracy score of the model accuracy = accuracy_score(y_test, y_pred) print(f"Accuracy: {accuracy}") mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) # ### 9) Naive Bayes Classifier from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train) y_pred = gnb.predict(x_tst) mse = mean_squared_error(y_test, y_pred) print("Mean Squared Error:", mse) accuracy = accuracy_score(y_test, y_pred) print(f"Accuracy: {accuracy}")
false
1
2,449
12
3,056
2,449
129427361
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def mapIdToCat(dataset): Id_Cat = {} for entity in dataset["data"]: paras = entity["paragraphs"] for in_entity in paras: question_objects = in_entity["qas"] context = in_entity["context"] for in_in_entity in question_objects: question = in_in_entity["question"] answers = in_in_entity["answers"] question_type = None if question.lower().startswith("what"): question_type = "What" elif question.lower().startswith("who"): question_type = "Who" elif question.lower().startswith("where"): question_type = "Where" elif question.lower().startswith("when"): question_type = "When" elif question.lower().startswith("why"): question_type = "Why" elif question.lower().startswith("how"): question_type = "How" elif question.lower().startswith("which"): question_type = "Which" elif question.lower().startswith("is"): question_type = "Yes/No" elif question.lower().startswith("do"): question_type = "Yes/No" else: question_type = "Other" Id_Cat[in_in_entity["id"]] = question_type return Id_Cat f = open("/kaggle/input/squad-train/train-v2.0.json") data = json.load(f) Res_Id_Cat = mapIdToCat(data) import json def calAvgForEachCat(modelOutputPath): with open(modelOutputPath, "r") as file: data = file.read() LoadedIdF = json.loads(data) catSum = {} catCount = {} cats = question_types = [ "What", "Who", "Where", "When", "Why", "How", "Which", "Yes/No", "Other", ] for cat in cats: catSum[cat] = 0 catCount[cat] = 0 for id in LoadedIdF: if id in Res_Id_Cat: curCat = Res_Id_Cat[id] curF = LoadedIdF[id][1] catSum[curCat] += curF catCount[curCat] += 1 for cat in cats: if catCount[cat] != 0: catSum[cat] = catSum[cat] / catCount[cat] return catSum import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/id-f1score/prediction_trainDataset_BIDAF.txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 Score") plt.title("BIDAF") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/id-f1score/prediction_trainDataset_BiLSTM.txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("BILSTM") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat( "/kaggle/input/id-f1score/prediction_trainDataset_DistilBERT.txt" ) # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("DISTILLED BERT") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/bertupdated/outputval (1).txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("BERT") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/427/129427361.ipynb
null
null
[{"Id": 129427361, "ScriptId": 38436581, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14938692, "CreationDate": "05/13/2023 17:41:19", "VersionNumber": 1.0, "Title": "notebook9f56f2eb64", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 163.0, "LinesInsertedFromPrevious": 163.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def mapIdToCat(dataset): Id_Cat = {} for entity in dataset["data"]: paras = entity["paragraphs"] for in_entity in paras: question_objects = in_entity["qas"] context = in_entity["context"] for in_in_entity in question_objects: question = in_in_entity["question"] answers = in_in_entity["answers"] question_type = None if question.lower().startswith("what"): question_type = "What" elif question.lower().startswith("who"): question_type = "Who" elif question.lower().startswith("where"): question_type = "Where" elif question.lower().startswith("when"): question_type = "When" elif question.lower().startswith("why"): question_type = "Why" elif question.lower().startswith("how"): question_type = "How" elif question.lower().startswith("which"): question_type = "Which" elif question.lower().startswith("is"): question_type = "Yes/No" elif question.lower().startswith("do"): question_type = "Yes/No" else: question_type = "Other" Id_Cat[in_in_entity["id"]] = question_type return Id_Cat f = open("/kaggle/input/squad-train/train-v2.0.json") data = json.load(f) Res_Id_Cat = mapIdToCat(data) import json def calAvgForEachCat(modelOutputPath): with open(modelOutputPath, "r") as file: data = file.read() LoadedIdF = json.loads(data) catSum = {} catCount = {} cats = question_types = [ "What", "Who", "Where", "When", "Why", "How", "Which", "Yes/No", "Other", ] for cat in cats: catSum[cat] = 0 catCount[cat] = 0 for id in LoadedIdF: if id in Res_Id_Cat: curCat = Res_Id_Cat[id] curF = LoadedIdF[id][1] catSum[curCat] += curF catCount[curCat] += 1 for cat in cats: if catCount[cat] != 0: catSum[cat] = catSum[cat] / catCount[cat] return catSum import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/id-f1score/prediction_trainDataset_BIDAF.txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 Score") plt.title("BIDAF") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/id-f1score/prediction_trainDataset_BiLSTM.txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("BILSTM") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat( "/kaggle/input/id-f1score/prediction_trainDataset_DistilBERT.txt" ) # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("DISTILLED BERT") plt.show() import matplotlib.pyplot as plt # Example dictionary data = calAvgForEachCat("/kaggle/input/bertupdated/outputval (1).txt") # Extract the x-values and corresponding heights x = list(data.keys()) heights = list(data.values()) # Plotting plt.bar(x, heights) plt.xlabel("Categories") plt.ylabel("Avg F1 score") plt.title("BERT") plt.show()
false
0
1,228
0
1,228
1,228
129427373
<jupyter_start><jupyter_text>IMDB movies dataset The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies. Kaggle dataset identifier: imdb-movies-dataset <jupyter_script># # Import Libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Exploratory Data Analysis df = pd.read_csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv") print("Number of movies is: ", df.shape[0]) df.head() df.info() df.dropna(inplace=True) df.columns df.country.unique() df.describe().T.style.format("{:,.2f}") corr = df[["score", "revenue", "budget_x"]].corr() sns.heatmap(corr, annot=True) top_10_revenues = df.sort_values("revenue", ascending=True)[-10:] top_10_revenues # Create a bar plot plt.bar(top_10_revenues["names"], top_10_revenues["revenue"]) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Movie Title") plt.ylabel("Revenue") # Show the plot plt.show() revenue_by_country = df.loc[:, ["country", "revenue"]] revenue_by_country = revenue_by_country.groupby("country").sum() top_5_countries = revenue_by_country.nlargest(5, "revenue") # Create a bar plot plt.bar(top_5_countries.index, top_5_countries["revenue"] / 1000000000) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Country") plt.ylabel("Revenue (in billions)") # Show the plot plt.show() df["date_x"] = pd.to_datetime(df["date_x"]) df["year"] = df["date_x"].dt.year mean_by_year = df.groupby("year")[["revenue", "budget_x"]].mean() plt.figure(figsize=(30, 15)) plt.bar(mean_by_year.index, mean_by_year.revenue / 1000000, width=1) plt.xlabel("Year", fontsize=18) plt.ylabel("Revenue (in millions)", fontsize=18) plt.title("Revenue over Years (1903-2023)", fontsize=18) revenue_by_lang = df.loc[:, ["orig_lang", "revenue"]] revenue_by_lang = revenue_by_lang.groupby("orig_lang").sum() top_5_langs = revenue_by_lang.nlargest(5, "revenue") # Create a bar plot plt.bar(top_5_langs.index, top_5_langs["revenue"] / 1000000000) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Country") plt.ylabel("Revenue (in billions)") # Show the plot plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/427/129427373.ipynb
imdb-movies-dataset
ashpalsingh1525
[{"Id": 129427373, "ScriptId": 38481937, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10444652, "CreationDate": "05/13/2023 17:41:29", "VersionNumber": 1.0, "Title": "EDA on IMDB Movies", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 100.0, "LinesInsertedFromPrevious": 100.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
[{"Id": 185468030, "KernelVersionId": 129427373, "SourceDatasetVersionId": 5552662}]
[{"Id": 5552662, "DatasetId": 3198793, "DatasourceVersionId": 5627422, "CreatorUserId": 13490345, "LicenseName": "Community Data License Agreement - Permissive - Version 1.0", "CreationDate": "04/28/2023 23:18:15", "VersionNumber": 1.0, "Title": "IMDB movies dataset", "Slug": "imdb-movies-dataset", "Subtitle": "Explore 10000+ movies worldwide with the IMDB Movies dataset", "Description": "The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3198793, "CreatorUserId": 13490345, "OwnerUserId": 13490345.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5552662.0, "CurrentDatasourceVersionId": 5627422.0, "ForumId": 3263430, "Type": 2, "CreationDate": "04/28/2023 23:18:15", "LastActivityDate": "04/28/2023", "TotalViews": 19297, "TotalDownloads": 3999, "TotalVotes": 79, "TotalKernels": 10}]
[{"Id": 13490345, "UserName": "ashpalsingh1525", "DisplayName": "Ashpal Singh1525", "RegisterDate": "01/31/2023", "PerformanceTier": 2}]
# # Import Libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Exploratory Data Analysis df = pd.read_csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv") print("Number of movies is: ", df.shape[0]) df.head() df.info() df.dropna(inplace=True) df.columns df.country.unique() df.describe().T.style.format("{:,.2f}") corr = df[["score", "revenue", "budget_x"]].corr() sns.heatmap(corr, annot=True) top_10_revenues = df.sort_values("revenue", ascending=True)[-10:] top_10_revenues # Create a bar plot plt.bar(top_10_revenues["names"], top_10_revenues["revenue"]) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Movie Title") plt.ylabel("Revenue") # Show the plot plt.show() revenue_by_country = df.loc[:, ["country", "revenue"]] revenue_by_country = revenue_by_country.groupby("country").sum() top_5_countries = revenue_by_country.nlargest(5, "revenue") # Create a bar plot plt.bar(top_5_countries.index, top_5_countries["revenue"] / 1000000000) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Country") plt.ylabel("Revenue (in billions)") # Show the plot plt.show() df["date_x"] = pd.to_datetime(df["date_x"]) df["year"] = df["date_x"].dt.year mean_by_year = df.groupby("year")[["revenue", "budget_x"]].mean() plt.figure(figsize=(30, 15)) plt.bar(mean_by_year.index, mean_by_year.revenue / 1000000, width=1) plt.xlabel("Year", fontsize=18) plt.ylabel("Revenue (in millions)", fontsize=18) plt.title("Revenue over Years (1903-2023)", fontsize=18) revenue_by_lang = df.loc[:, ["orig_lang", "revenue"]] revenue_by_lang = revenue_by_lang.groupby("orig_lang").sum() top_5_langs = revenue_by_lang.nlargest(5, "revenue") # Create a bar plot plt.bar(top_5_langs.index, top_5_langs["revenue"] / 1000000000) # Set the x-axis labels plt.xticks(rotation=90) # Label the axes plt.xlabel("Country") plt.ylabel("Revenue (in billions)") # Show the plot plt.show()
false
1
918
7
1,033
918
129427879
<jupyter_start><jupyter_text>Video Game Sales This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1]. Fields include * Rank - Ranking of overall sales * Name - The games name * Platform - Platform of the games release (i.e. PC,PS4, etc.) * Year - Year of the game's release * Genre - Genre of the game * Publisher - Publisher of the game * NA_Sales - Sales in North America (in millions) * EU_Sales - Sales in Europe (in millions) * JP_Sales - Sales in Japan (in millions) * Other_Sales - Sales in the rest of the world (in millions) * Global_Sales - Total worldwide sales. The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape. It is based on BeautifulSoup using Python. There are 16,598 records. 2 records were dropped due to incomplete information. [1]: http://www.vgchartz.com/ Kaggle dataset identifier: videogamesales <jupyter_code>import pandas as pd df = pd.read_csv('videogamesales/vgsales.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <jupyter_text>Examples: { "Rank": 1, "Name": "Wii Sports", "Platform": "Wii", "Year": 2006, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 41.49, "EU_Sales": 29.02, "JP_Sales": 3.77, "Other_Sales": 8.46, "Global_Sales": 82.74 } { "Rank": 2, "Name": "Super Mario Bros.", "Platform": "NES", "Year": 1985, "Genre": "Platform", "Publisher": "Nintendo", "NA_Sales": 29.08, "EU_Sales": 3.58, "JP_Sales": 6.8100000000000005, "Other_Sales": 0.77, "Global_Sales": 40.24 } { "Rank": 3, "Name": "Mario Kart Wii", "Platform": "Wii", "Year": 2008, "Genre": "Racing", "Publisher": "Nintendo", "NA_Sales": 15.85, "EU_Sales": 12.88, "JP_Sales": 3.79, "Other_Sales": 3.31, "Global_Sales": 35.82 } { "Rank": 4, "Name": "Wii Sports Resort", "Platform": "Wii", "Year": 2009, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 15.75, "EU_Sales": 11.01, "JP_Sales": 3.2800000000000002, "Other_Sales": 2.96, "Global_Sales": 33.0 } <jupyter_script># # Lab: Data Analysis with Pandas (Video Games # # Data Set: Video Game Sales # # By: Mohamad Shareef Naser # 10/5/2023 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Reading CSV file: df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df over10000 = df[df["Global_Sales"] > 0.01] over10000 # # 1. Which company is the most common video game publisher? over10000["Publisher"].value_counts().index[0] # # 2. What’s the most common platform? over10000["Platform"].value_counts().index[0] # # 3. What about the most common genre? over10000["Genre"].value_counts().index[0] # # 4. What are the top 20 highest grossing games? over10000[["Name", "Global_Sales"]].sort_values("Global_Sales", ascending=False)[0:20] # # 5. For North American video game sales, what’s the median? median_sales = over10000["NA_Sales"].median() median_sales # ### - Provide a secondary output showing ten games surrounding the median sales output. # ### - Assume that games with same median value are sorted in descending order. # surrounding_df = over10000[ ((over10000["NA_Sales"] >= median_sales) & (over10000["NA_Sales"] <= median_sales)) ].head(10) surrounding_df = surrounding_df.sort_values("NA_Sales", ascending=False) surrounding_df # # 6. For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? # na_sales = over10000["NA_Sales"] mean = na_sales.mean() std_dev = na_sales.std() top_selling_sales = na_sales.max() num_std_dev = (top_selling_sales - mean) / std_dev num_std_dev # # 7. The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? # wii_sales = df[df["Platform"] == "Wii"]["Global_Sales"] wii_avg_sales = wii_sales.mean() other_platforms_avg_sales = df[df["Platform"] != "Wii"]["Global_Sales"].mean() if wii_avg_sales > other_platforms_avg_sales: comparison = "higher" elif wii_avg_sales < other_platforms_avg_sales: comparison = "lower" else: comparison = "equal" print( f"The average number of sales for the Nintendo Wii is {comparison} than all other platforms." ) # # 8. Come up with 3 more questions that can be answered with this data set. # ## Q1:What is the total number of games in the dataset? # total_games = len(over10000) print("Total number of games:", total_games) # ## Q2:What are the top 5 best-selling games globally? # top_selling_games = over10000.nlargest(5, "Global_Sales")[["Name", "Global_Sales"]] print("Top 5 best-selling games globally:") print(top_selling_games) # ## Q3:How many games were released in each platform? # games_per_platform = over10000["Platform"].value_counts() print("Number of games released in each platform:") print(games_per_platform)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/427/129427879.ipynb
videogamesales
gregorut
[{"Id": 129427879, "ScriptId": 38339649, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13648771, "CreationDate": "05/13/2023 17:47:32", "VersionNumber": 4.0, "Title": "vg-stats", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 124.0, "LinesInsertedFromPrevious": 44.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 80.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185468912, "KernelVersionId": 129427879, "SourceDatasetVersionId": 618}]
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
# # Lab: Data Analysis with Pandas (Video Games # # Data Set: Video Game Sales # # By: Mohamad Shareef Naser # 10/5/2023 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Reading CSV file: df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df over10000 = df[df["Global_Sales"] > 0.01] over10000 # # 1. Which company is the most common video game publisher? over10000["Publisher"].value_counts().index[0] # # 2. What’s the most common platform? over10000["Platform"].value_counts().index[0] # # 3. What about the most common genre? over10000["Genre"].value_counts().index[0] # # 4. What are the top 20 highest grossing games? over10000[["Name", "Global_Sales"]].sort_values("Global_Sales", ascending=False)[0:20] # # 5. For North American video game sales, what’s the median? median_sales = over10000["NA_Sales"].median() median_sales # ### - Provide a secondary output showing ten games surrounding the median sales output. # ### - Assume that games with same median value are sorted in descending order. # surrounding_df = over10000[ ((over10000["NA_Sales"] >= median_sales) & (over10000["NA_Sales"] <= median_sales)) ].head(10) surrounding_df = surrounding_df.sort_values("NA_Sales", ascending=False) surrounding_df # # 6. For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? # na_sales = over10000["NA_Sales"] mean = na_sales.mean() std_dev = na_sales.std() top_selling_sales = na_sales.max() num_std_dev = (top_selling_sales - mean) / std_dev num_std_dev # # 7. The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? # wii_sales = df[df["Platform"] == "Wii"]["Global_Sales"] wii_avg_sales = wii_sales.mean() other_platforms_avg_sales = df[df["Platform"] != "Wii"]["Global_Sales"].mean() if wii_avg_sales > other_platforms_avg_sales: comparison = "higher" elif wii_avg_sales < other_platforms_avg_sales: comparison = "lower" else: comparison = "equal" print( f"The average number of sales for the Nintendo Wii is {comparison} than all other platforms." ) # # 8. Come up with 3 more questions that can be answered with this data set. # ## Q1:What is the total number of games in the dataset? # total_games = len(over10000) print("Total number of games:", total_games) # ## Q2:What are the top 5 best-selling games globally? # top_selling_games = over10000.nlargest(5, "Global_Sales")[["Name", "Global_Sales"]] print("Top 5 best-selling games globally:") print(top_selling_games) # ## Q3:How many games were released in each platform? # games_per_platform = over10000["Platform"].value_counts() print("Number of games released in each platform:") print(games_per_platform)
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
true
1
<start_data_description><data_path>videogamesales/vgsales.csv: <column_names> ['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales'] <column_types> {'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'} <dataframe_Summary> {'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}} <dataframe_info> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <some_examples> {'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}} <end_description>
1,112
0
2,225
1,112
129907751
import os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv( os.path.join("/kaggle/input/titanicfinal1/DSB_Day1_Titanic_train.csv") ) test = pd.read_csv(os.path.join("/kaggle/input/titanicfinal1/test (1).csv")) train.info() train.head() train["Survived"].value_counts(normalize=True) sns.countplot(train["Survived"]) train["Name"].head() train["Name_Title"] = ( train["Name"].apply(lambda x: x.split(",")[1]).apply(lambda x: x.split()[0]) ) train["Name_Title"].value_counts() train["Survived"].groupby(train["Name_Title"]).mean() train["Name_Len"] = train["Name"].apply(lambda x: len(x)) train["Survived"].groupby(pd.qcut(train["Name_Len"], 5)).mean() pd.qcut(train["Name_Len"], 5).value_counts() train["Sex"].value_counts(normalize=True) train["Survived"].groupby(train["Sex"]).mean() train["Survived"].groupby(train["Age"].isnull()).mean() train["Survived"].groupby(pd.qcut(train["Age"], 5)).mean() pd.qcut(train["Age"], 5).value_counts() train["Ticket"].head(n=10) train["Ticket_Len"] = train["Ticket"].apply(lambda x: len(x)) train["Ticket_Len"].value_counts() train["Ticket_Lett"] = train["Ticket"].apply(lambda x: str(x)[0]) train["Ticket_Lett"].value_counts() train.groupby(["Ticket_Lett"])["Survived"].mean() train["Embarked"].value_counts() train["Embarked"].value_counts(normalize=True) train["Survived"].groupby(train["Embarked"]).mean() train["Survived"].groupby(train["Pclass"]).mean() sns.countplot(x=train["Pclass"], hue=train["Survived"]) train["Embarked"].value_counts() train["Embarked"].value_counts(normalize=True) train["Survived"].groupby(train["Embarked"]).mean() sns.countplot(x=train["Embarked"], hue=train["Pclass"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/907/129907751.ipynb
null
null
[{"Id": 129907751, "ScriptId": 38640948, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13993613, "CreationDate": "05/17/2023 10:58:29", "VersionNumber": 1.0, "Title": "titanic", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv( os.path.join("/kaggle/input/titanicfinal1/DSB_Day1_Titanic_train.csv") ) test = pd.read_csv(os.path.join("/kaggle/input/titanicfinal1/test (1).csv")) train.info() train.head() train["Survived"].value_counts(normalize=True) sns.countplot(train["Survived"]) train["Name"].head() train["Name_Title"] = ( train["Name"].apply(lambda x: x.split(",")[1]).apply(lambda x: x.split()[0]) ) train["Name_Title"].value_counts() train["Survived"].groupby(train["Name_Title"]).mean() train["Name_Len"] = train["Name"].apply(lambda x: len(x)) train["Survived"].groupby(pd.qcut(train["Name_Len"], 5)).mean() pd.qcut(train["Name_Len"], 5).value_counts() train["Sex"].value_counts(normalize=True) train["Survived"].groupby(train["Sex"]).mean() train["Survived"].groupby(train["Age"].isnull()).mean() train["Survived"].groupby(pd.qcut(train["Age"], 5)).mean() pd.qcut(train["Age"], 5).value_counts() train["Ticket"].head(n=10) train["Ticket_Len"] = train["Ticket"].apply(lambda x: len(x)) train["Ticket_Len"].value_counts() train["Ticket_Lett"] = train["Ticket"].apply(lambda x: str(x)[0]) train["Ticket_Lett"].value_counts() train.groupby(["Ticket_Lett"])["Survived"].mean() train["Embarked"].value_counts() train["Embarked"].value_counts(normalize=True) train["Survived"].groupby(train["Embarked"]).mean() train["Survived"].groupby(train["Pclass"]).mean() sns.countplot(x=train["Pclass"], hue=train["Survived"]) train["Embarked"].value_counts() train["Embarked"].value_counts(normalize=True) train["Survived"].groupby(train["Embarked"]).mean() sns.countplot(x=train["Embarked"], hue=train["Pclass"])
false
0
616
0
616
616
129907949
from IPython.display import Image, display display(Image(filename="32.png")) display(Image(filename="32_predict.png"))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/907/129907949.ipynb
null
null
[{"Id": 129907949, "ScriptId": 38431293, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14981455, "CreationDate": "05/17/2023 11:00:25", "VersionNumber": 2.0, "Title": "notebook5708a89a28", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 38.0, "LinesInsertedFromPrevious": 30.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 8.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from IPython.display import Image, display display(Image(filename="32.png")) display(Image(filename="32_predict.png"))
false
0
36
0
36
36
129893775
# ## Import important libraries and read data import pandas as pd import plotly_express as px import plotly.graph_objects as go df = pd.read_parquet("/kaggle/input/api-management-data/api_management_clean.parquet") # ## Overview df.shape df.head() df.tail() df.describe() df.dtypes # ## Cleansing # ### Check Nulls df.isnull().sum() # ### Object to Number df["code"] = df["code"].apply(lambda x: int(x)) df["Price"] = df["Price"].astype(int) df["gwStatus"] = df["gwStatus"].astype(bool) df["response_status"] = df["response_status"].astype(int) df["status"] = df["status"].astype(bool) df["gwCode"] = df["gwCode"].astype(int) df.dtypes # ### Handle date feature # Convert object type to datetime type df["received_at"] = pd.to_datetime(df["received_at"], utc=True) # Create some new columns from received_at column df["received_at_short"] = df["received_at"].dt.date df["received_at_year"] = df["received_at"].dt.year df["received_at_month"] = df["received_at"].dt.month df["received_at_day"] = df["received_at"].dt.day df.tail() # These columns can be replace with boolian values: # **log_level, method, planName, log_type, paymentType, call_type, api_type** # or in **planName** case they can encode with ordinary numbers like 1 2 3. # **BUT** we keep them as they are, cause at the moment we dont have plan for any ML models. # ## EDA # ### Count Percentage Plot By Column available_colors = [ "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "black", "blanchedalmond", "blue", "blueviolet", "brown", "burlywood", "cadetblue", "chartreuse", "chocolate", "coral", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "darkcyan", "darkgoldenrod", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkslategrey", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "grey", "green", "greenyellow", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcoral", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightpink", "lightsalmon", "lightseagreen", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "magenta", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "mediumpurple", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "slategray", "slategrey", "snow", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", ] def count_percentage_plot_by_column(column_name, color_name="aqua"): global available_colors global df if column_name not in df.columns.tolist(): raise Exception(f"Column with name '{column_name}' doesnt exists.") if color_name not in available_colors: raise Exception(f"Color with name '{color_name}' doesnt exists.") count_percentage = [ round(i, 2) for i in df[column_name].value_counts().values / df.shape[0] * 100 ] uniques_values = df[column_name].value_counts().index result = pd.DataFrame( {column_name: uniques_values, "count_percentage": count_percentage} ) fig = px.bar(result, x=column_name, y="count_percentage") fig.update_layout(autosize=False, width=400, height=400) fig.update_traces(marker=dict(color=color_name)) fig.show() count_percentage_plot_by_column("log_level", "aqua") count_percentage_plot_by_column("code", "pink") count_percentage_plot_by_column("method", "olive") count_percentage_plot_by_column("Price", "midnightblue") count_percentage_plot_by_column("gwStatus", "lightseagreen") count_percentage_plot_by_column("planName", "purple") count_percentage_plot_by_column("received_at_year", "peru") count_percentage_plot_by_column("log_type", "orchid") count_percentage_plot_by_column("response_status", "gold") count_percentage_plot_by_column("status", "darkorange") count_percentage_plot_by_column("paymentType", "lime") count_percentage_plot_by_column("gwCode", "deeppink") count_percentage_plot_by_column("call_type", "turquoise") count_percentage_plot_by_column("api_type", "plum") # ### Loss Money For ERROR By Provider Name def loss_money_by_provider(pname): if pname not in df["providerName"].unique(): raise Exception( f"This provider name {pname} doesnt exists in provider names list." ) lost_money = df[(df["log_level"] == "ERROR") & (df["providerName"] == pname)][ "Price" ].sum() fig = go.Figure() fig.add_trace( go.Indicator( mode="number+delta", value=lost_money, domain={"row": 0, "column": 1} ) ) fig.update_layout( autosize=False, width=400, height=400, title={ "text": f"Loss money for {pname}", "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, ) fig.show() loss_money_by_provider("provider_186") # ### Notify Providers who having Gold plan but *don't serve well*. # ERROR AND GOLD PLAN error_and_goldplan = round( df[(df["log_level"] == "ERROR") & (df["planName"] == "Gold")].shape[0], 2 ) # ALL GOLD PLAN all_goldplan = df[(df["planName"] == "Gold")].shape[0] # We can see the percentage of failed transactions in Gold plan. It is almost 8% round(error_and_goldplan / all_goldplan * 100, 2) # Because of 92% of good performance in good plan, we can claim that the problem is from provider side and **not** from api management side. # Now let's check if we have a bad performer provider with huge amount of failed transaction who has a gold plan. df[(df["log_level"] == "ERROR") & (df["planName"] == "Gold")][ "providerName" ].value_counts() # The Worst one is **provider_186**, after it provider_410, provider_279, provider_376 respectively. We should notif them to solve the problem. # ## Outliers # ### providerResponse_Time df["providerResponse_Time"].plot(kind="box", figsize=(10, 10)) # In real case, **providerResponse_Time** more than **10 seconds** (intuitive-based) considers as **outliers**. We should do diagnostic analytics for outliers and notif providers. # But it's not the case because here we have mock data. df[df["providerResponse_Time"] > 10].shape[0] # ### response_Time df["response_Time"].plot(kind="box", figsize=(10, 10)) # In real case, **response_Time** more than **20 seconds** (intuitive-based) considers as **outliers**. We should do diagnostic analytics for outliers and notif providers. # But it's not the case because here we have mock data. # ### Relations df.columns df.corr()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/893/129893775.ipynb
null
null
[{"Id": 129893775, "ScriptId": 38632783, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7867890, "CreationDate": "05/17/2023 08:59:43", "VersionNumber": 1.0, "Title": "api-management", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 205.0, "LinesInsertedFromPrevious": 205.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Import important libraries and read data import pandas as pd import plotly_express as px import plotly.graph_objects as go df = pd.read_parquet("/kaggle/input/api-management-data/api_management_clean.parquet") # ## Overview df.shape df.head() df.tail() df.describe() df.dtypes # ## Cleansing # ### Check Nulls df.isnull().sum() # ### Object to Number df["code"] = df["code"].apply(lambda x: int(x)) df["Price"] = df["Price"].astype(int) df["gwStatus"] = df["gwStatus"].astype(bool) df["response_status"] = df["response_status"].astype(int) df["status"] = df["status"].astype(bool) df["gwCode"] = df["gwCode"].astype(int) df.dtypes # ### Handle date feature # Convert object type to datetime type df["received_at"] = pd.to_datetime(df["received_at"], utc=True) # Create some new columns from received_at column df["received_at_short"] = df["received_at"].dt.date df["received_at_year"] = df["received_at"].dt.year df["received_at_month"] = df["received_at"].dt.month df["received_at_day"] = df["received_at"].dt.day df.tail() # These columns can be replace with boolian values: # **log_level, method, planName, log_type, paymentType, call_type, api_type** # or in **planName** case they can encode with ordinary numbers like 1 2 3. # **BUT** we keep them as they are, cause at the moment we dont have plan for any ML models. # ## EDA # ### Count Percentage Plot By Column available_colors = [ "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "black", "blanchedalmond", "blue", "blueviolet", "brown", "burlywood", "cadetblue", "chartreuse", "chocolate", "coral", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "darkcyan", "darkgoldenrod", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkslategrey", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "grey", "green", "greenyellow", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcoral", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightpink", "lightsalmon", "lightseagreen", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "magenta", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "mediumpurple", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "slategray", "slategrey", "snow", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", ] def count_percentage_plot_by_column(column_name, color_name="aqua"): global available_colors global df if column_name not in df.columns.tolist(): raise Exception(f"Column with name '{column_name}' doesnt exists.") if color_name not in available_colors: raise Exception(f"Color with name '{color_name}' doesnt exists.") count_percentage = [ round(i, 2) for i in df[column_name].value_counts().values / df.shape[0] * 100 ] uniques_values = df[column_name].value_counts().index result = pd.DataFrame( {column_name: uniques_values, "count_percentage": count_percentage} ) fig = px.bar(result, x=column_name, y="count_percentage") fig.update_layout(autosize=False, width=400, height=400) fig.update_traces(marker=dict(color=color_name)) fig.show() count_percentage_plot_by_column("log_level", "aqua") count_percentage_plot_by_column("code", "pink") count_percentage_plot_by_column("method", "olive") count_percentage_plot_by_column("Price", "midnightblue") count_percentage_plot_by_column("gwStatus", "lightseagreen") count_percentage_plot_by_column("planName", "purple") count_percentage_plot_by_column("received_at_year", "peru") count_percentage_plot_by_column("log_type", "orchid") count_percentage_plot_by_column("response_status", "gold") count_percentage_plot_by_column("status", "darkorange") count_percentage_plot_by_column("paymentType", "lime") count_percentage_plot_by_column("gwCode", "deeppink") count_percentage_plot_by_column("call_type", "turquoise") count_percentage_plot_by_column("api_type", "plum") # ### Loss Money For ERROR By Provider Name def loss_money_by_provider(pname): if pname not in df["providerName"].unique(): raise Exception( f"This provider name {pname} doesnt exists in provider names list." ) lost_money = df[(df["log_level"] == "ERROR") & (df["providerName"] == pname)][ "Price" ].sum() fig = go.Figure() fig.add_trace( go.Indicator( mode="number+delta", value=lost_money, domain={"row": 0, "column": 1} ) ) fig.update_layout( autosize=False, width=400, height=400, title={ "text": f"Loss money for {pname}", "y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top", }, ) fig.show() loss_money_by_provider("provider_186") # ### Notify Providers who having Gold plan but *don't serve well*. # ERROR AND GOLD PLAN error_and_goldplan = round( df[(df["log_level"] == "ERROR") & (df["planName"] == "Gold")].shape[0], 2 ) # ALL GOLD PLAN all_goldplan = df[(df["planName"] == "Gold")].shape[0] # We can see the percentage of failed transactions in Gold plan. It is almost 8% round(error_and_goldplan / all_goldplan * 100, 2) # Because of 92% of good performance in good plan, we can claim that the problem is from provider side and **not** from api management side. # Now let's check if we have a bad performer provider with huge amount of failed transaction who has a gold plan. df[(df["log_level"] == "ERROR") & (df["planName"] == "Gold")][ "providerName" ].value_counts() # The Worst one is **provider_186**, after it provider_410, provider_279, provider_376 respectively. We should notif them to solve the problem. # ## Outliers # ### providerResponse_Time df["providerResponse_Time"].plot(kind="box", figsize=(10, 10)) # In real case, **providerResponse_Time** more than **10 seconds** (intuitive-based) considers as **outliers**. We should do diagnostic analytics for outliers and notif providers. # But it's not the case because here we have mock data. df[df["providerResponse_Time"] > 10].shape[0] # ### response_Time df["response_Time"].plot(kind="box", figsize=(10, 10)) # In real case, **response_Time** more than **20 seconds** (intuitive-based) considers as **outliers**. We should do diagnostic analytics for outliers and notif providers. # But it's not the case because here we have mock data. # ### Relations df.columns df.corr()
false
0
2,528
0
2,528
2,528
129893160
# Giới thiệu về Kaggle # Kaggle là một nền tảng trực tuyến cho cộng đồng Machine Learning (ML) và Khoa học dữ liệu. Kaggle cho phép người dùng chia sẻ, tìm kiếm các bộ dữ liệu; tìm hiểu và xây dựng models, tương tác với những nhà khoa học và kỹ sư ML trên toàn thế giới; tham gia các cuộc thi để có cơ hội chiến thắng những giải thưởng giá trị. Người dùng Kaggle sẽ được hỗ trợ Graphic Processing Unit (GPU) và gần đây có thêm Tensor Processing Unit (TPU) để tăng tốc độ tính toán trong quá trình training cũng như inference. # * [GPU](https://www.kaggle.com/docs/efficient-gpu-usage): # - NVIDIA K80 GPUs hỗ trợ tăng tốc độ training của mô hình deep learning lên đến 12.5 lần. # - Quota: 30 giờ/tuần # * [TPU](https://cloud.google.com/tpu/docs/tpus) # - TPUs là sản phẩm của Google để tăng tốc độ làm việc của các mô hình machiine learning, được support ở Tensorflow 2.1 (cả Keras high-level API và models sử dụng training loop tuỳ chỉnh) # - Quota: 30 giờ/tuần và tối đa 3h trong một lần chạy. # ## Competitions (Các cuộc thi) # Đây là nơi mà bạn có thể tham gia các cuộc thi hoặc tổ chức cuộc thi. Để tham gia một cuộc thi có giải thưởng hấp dẫn, click vào cuộc thi đó, đọc mô tả về cuộc thi và nhấn `Join Competition` ngay dưới cover (bên góc phải). # Ngoài ra, bạn cũng có thể tổ chức cuộc thi trên Kaggle cho lớp học của bạn bằng cách truy cập vào đường [link này](https://www.kaggle.com/c/about/inclass/overview) và làm theo hướng dẫn rất cụ thể từ Kaggle. # ## Data # 4 hình thức để tạo Dataset: # * Upload từ máy tính của bạn # * Chia sẻ qua đường link # * Chia sẻ từ Github repository # * Embed từ những kernel khác trên Kaggle # ## Notebooks # Đây là tính năng đáng chú ý nhất của Kaggle và bạn có thể tận dụng được tài nguyên tính toán miễn phí được hỗ trợ (TPUs và GPUs) như được nhắc đến ở trên để huấn luyện mô hình. # ### Khởi tạo Notebooks # Có 4 cách chính để khởi tạo Notebooks trên Kaggle: # **(1) Tạo Notebook trống**: Click vào thẻ `Notebooks` trên `Menu sidebar` trên **Profile** của bạn (nhấn vào avatar ở gốc phải trên cùng, chọn `My Profile`), chọn `New Notebook`. # **(2) Tạo Notebook từ Dataset**: Trong một dataset bất kỳ (đã được giới thiệu ở mục **Data**), click chọn `New Noteboook` ở dưới phải của ảnh cover. # **(3) Tạo Notebook từ Competition**: Sau khi Join Competition, người dùng có thể tạo Notebook từ Competition đó bằng cách nhấn vào thẻ `Notebooks` trong competition rồi chọn `New Notebook` # **(4) Fork từ một Notebook khác**: bạn có thể Fork (copy) một Notebook của bạn hoặc của người khác chia sẻ để tạo một Notebook mới. Notebook này sẽ copy hết toàn bộ nội dung (code, text, v.v.) và kết nối với dữ liệu của Notebook cũ. Data này bạn có thể thấy ở bên phải màn hình. Từ Notebook mới người dùng có thể điều chỉnh lại code và lưu lại. # ### Thay đổi cài đặt cho Notebook # Người dùng có thể thay đổi cài đặt cho Notebook với những options ở bên phải màn hình như bên dưới: # Hình 4 # Ở đây, bạn có thể thay đổi Ngôn ngữ lập trình, Môi trường (giữ cố định version của các packages [recommended] hoặc luôn update packages), Accelerator: None | GPU | TPU v3-8; Internet # **Lưu ý:** # * Quota của GPU và TPU v3-8 đã được nhắc đến ở phần mở đầu. # * TPU v3-8 chỉ sử dụng được khi bật Internet ở chế độ `On`. Ở một số cuộc thi trên Kaggle không cho phép submit Notebook cho phép Internet hoạt động. Bạn có thể dùng TPU v3-8 để train, còn khi submit để inference thì không sử dụng được đối với những cuộc thi này. Để sử dụng TPU (chỉ support cho Tensorflow 2.1), bạn cần thêm một bước để phát hiện và khởi tạo TPU như [Tutorial rất chi tiết này của Kaggle](https://www.kaggle.com/docs/tpu). # ### Thêm data cho hoặc xoá data khỏi Notebook # **Thêm data** # Ở bên phải màn hình chọn `+ Add data`, ở đây bạn có thể thêm data từ Datasets, Competition Data, Kernel Output Files (Output của Notebook) của chính bạn hoặc của người khác chia sẻ. # **Xoá data** # Click vào dấu `x` ngay cạnh tên data bạn muốn xoá # ### Import data và lưu data trên Kaggle # * Input: `/kaggle/input` # * Output: # + `/kaggle/working`: output lưu ở đây sẽ được lưu lại để sử dụng cho những lần sau khi bạn tạo một version sử dụng `Save & Run All`. Tuy nhiên, quota cho output là 5GB, bạn không thể lưu output của Notebook vượt quá dung lượng này. # + `/kaggle/temp`: output lưu ở đây chỉ sử dụng được trong nội bộ session đang chạy. Nếu lưu và thoát ra thì sẽ biến mất. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # List /kaggle/input để xem các files và folders dữ liệu được liên kết với Notebook # Import data (giống như Jupyter Notebook, bạn có thể sử dụng tab để auto-complete đường dẫn) data = pd.read_csv("/kaggle/input/dataisbeautiful/r_dataisbeautiful_posts.csv") data.head(3) # Tạo ổ nhớ tạm os.makedirs("/kaggle/temp", exist_ok=True) # Lưu data vào ổ nhớ tạm, file này sẽ không được lưu xuất hiện sau khi bạn lưu và commit notebook data[:3].to_csv("/kaggle/temp/temp.csv", index=False) # Đọc data đã lưu từ ổ nhớ tạm pd.read_csv("/kaggle/temp/temp.csv") # Lưu data để sử dụng cho những version hay notebook sau, file `submission.csv` sẽ lưu giữ sau khi Notebook được commit # data[:3].to_csv("/kaggle/working/submission.csv",index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/893/129893160.ipynb
null
null
[{"Id": 129893160, "ScriptId": 38636527, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14853191, "CreationDate": "05/17/2023 08:52:41", "VersionNumber": 1.0, "Title": "T\u00ecm hi\u1ec3u Kaggle", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 121.0, "LinesInsertedFromPrevious": 121.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Giới thiệu về Kaggle # Kaggle là một nền tảng trực tuyến cho cộng đồng Machine Learning (ML) và Khoa học dữ liệu. Kaggle cho phép người dùng chia sẻ, tìm kiếm các bộ dữ liệu; tìm hiểu và xây dựng models, tương tác với những nhà khoa học và kỹ sư ML trên toàn thế giới; tham gia các cuộc thi để có cơ hội chiến thắng những giải thưởng giá trị. Người dùng Kaggle sẽ được hỗ trợ Graphic Processing Unit (GPU) và gần đây có thêm Tensor Processing Unit (TPU) để tăng tốc độ tính toán trong quá trình training cũng như inference. # * [GPU](https://www.kaggle.com/docs/efficient-gpu-usage): # - NVIDIA K80 GPUs hỗ trợ tăng tốc độ training của mô hình deep learning lên đến 12.5 lần. # - Quota: 30 giờ/tuần # * [TPU](https://cloud.google.com/tpu/docs/tpus) # - TPUs là sản phẩm của Google để tăng tốc độ làm việc của các mô hình machiine learning, được support ở Tensorflow 2.1 (cả Keras high-level API và models sử dụng training loop tuỳ chỉnh) # - Quota: 30 giờ/tuần và tối đa 3h trong một lần chạy. # ## Competitions (Các cuộc thi) # Đây là nơi mà bạn có thể tham gia các cuộc thi hoặc tổ chức cuộc thi. Để tham gia một cuộc thi có giải thưởng hấp dẫn, click vào cuộc thi đó, đọc mô tả về cuộc thi và nhấn `Join Competition` ngay dưới cover (bên góc phải). # Ngoài ra, bạn cũng có thể tổ chức cuộc thi trên Kaggle cho lớp học của bạn bằng cách truy cập vào đường [link này](https://www.kaggle.com/c/about/inclass/overview) và làm theo hướng dẫn rất cụ thể từ Kaggle. # ## Data # 4 hình thức để tạo Dataset: # * Upload từ máy tính của bạn # * Chia sẻ qua đường link # * Chia sẻ từ Github repository # * Embed từ những kernel khác trên Kaggle # ## Notebooks # Đây là tính năng đáng chú ý nhất của Kaggle và bạn có thể tận dụng được tài nguyên tính toán miễn phí được hỗ trợ (TPUs và GPUs) như được nhắc đến ở trên để huấn luyện mô hình. # ### Khởi tạo Notebooks # Có 4 cách chính để khởi tạo Notebooks trên Kaggle: # **(1) Tạo Notebook trống**: Click vào thẻ `Notebooks` trên `Menu sidebar` trên **Profile** của bạn (nhấn vào avatar ở gốc phải trên cùng, chọn `My Profile`), chọn `New Notebook`. # **(2) Tạo Notebook từ Dataset**: Trong một dataset bất kỳ (đã được giới thiệu ở mục **Data**), click chọn `New Noteboook` ở dưới phải của ảnh cover. # **(3) Tạo Notebook từ Competition**: Sau khi Join Competition, người dùng có thể tạo Notebook từ Competition đó bằng cách nhấn vào thẻ `Notebooks` trong competition rồi chọn `New Notebook` # **(4) Fork từ một Notebook khác**: bạn có thể Fork (copy) một Notebook của bạn hoặc của người khác chia sẻ để tạo một Notebook mới. Notebook này sẽ copy hết toàn bộ nội dung (code, text, v.v.) và kết nối với dữ liệu của Notebook cũ. Data này bạn có thể thấy ở bên phải màn hình. Từ Notebook mới người dùng có thể điều chỉnh lại code và lưu lại. # ### Thay đổi cài đặt cho Notebook # Người dùng có thể thay đổi cài đặt cho Notebook với những options ở bên phải màn hình như bên dưới: # Hình 4 # Ở đây, bạn có thể thay đổi Ngôn ngữ lập trình, Môi trường (giữ cố định version của các packages [recommended] hoặc luôn update packages), Accelerator: None | GPU | TPU v3-8; Internet # **Lưu ý:** # * Quota của GPU và TPU v3-8 đã được nhắc đến ở phần mở đầu. # * TPU v3-8 chỉ sử dụng được khi bật Internet ở chế độ `On`. Ở một số cuộc thi trên Kaggle không cho phép submit Notebook cho phép Internet hoạt động. Bạn có thể dùng TPU v3-8 để train, còn khi submit để inference thì không sử dụng được đối với những cuộc thi này. Để sử dụng TPU (chỉ support cho Tensorflow 2.1), bạn cần thêm một bước để phát hiện và khởi tạo TPU như [Tutorial rất chi tiết này của Kaggle](https://www.kaggle.com/docs/tpu). # ### Thêm data cho hoặc xoá data khỏi Notebook # **Thêm data** # Ở bên phải màn hình chọn `+ Add data`, ở đây bạn có thể thêm data từ Datasets, Competition Data, Kernel Output Files (Output của Notebook) của chính bạn hoặc của người khác chia sẻ. # **Xoá data** # Click vào dấu `x` ngay cạnh tên data bạn muốn xoá # ### Import data và lưu data trên Kaggle # * Input: `/kaggle/input` # * Output: # + `/kaggle/working`: output lưu ở đây sẽ được lưu lại để sử dụng cho những lần sau khi bạn tạo một version sử dụng `Save & Run All`. Tuy nhiên, quota cho output là 5GB, bạn không thể lưu output của Notebook vượt quá dung lượng này. # + `/kaggle/temp`: output lưu ở đây chỉ sử dụng được trong nội bộ session đang chạy. Nếu lưu và thoát ra thì sẽ biến mất. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # List /kaggle/input để xem các files và folders dữ liệu được liên kết với Notebook # Import data (giống như Jupyter Notebook, bạn có thể sử dụng tab để auto-complete đường dẫn) data = pd.read_csv("/kaggle/input/dataisbeautiful/r_dataisbeautiful_posts.csv") data.head(3) # Tạo ổ nhớ tạm os.makedirs("/kaggle/temp", exist_ok=True) # Lưu data vào ổ nhớ tạm, file này sẽ không được lưu xuất hiện sau khi bạn lưu và commit notebook data[:3].to_csv("/kaggle/temp/temp.csv", index=False) # Đọc data đã lưu từ ổ nhớ tạm pd.read_csv("/kaggle/temp/temp.csv") # Lưu data để sử dụng cho những version hay notebook sau, file `submission.csv` sẽ lưu giữ sau khi Notebook được commit # data[:3].to_csv("/kaggle/working/submission.csv",index=False)
false
0
2,359
0
2,359
2,359
129970235
import numpy as np import pandas as pd import os from netCDF4 import Dataset data = Dataset( r"/kaggle/input/istanbul-era5-reanalysis-data-2022/adaptor.mars.internal-1683572405.9284267-22767-2-3b607d67-6460-4af3-9a2e-5b18003a18e3.nc", "r", ) data1 = Dataset( r"/kaggle/input/istanbul-era5-reanalysis-data-2022-2/adaptor.mars.internal-1683640761.811922-13030-8-47cadd82-dda7-4d14-b2d9-dee7d4e16434.nc", "r", ) empty = {} c = 0 for key in data.variables.keys(): if c > 2: empty[key] = data.variables[key][:, 2, 5] c += 1 df = pd.DataFrame(empty) datetimeindex = pd.date_range("2022-01-01", periods=8760, freq="1h") df.index = datetimeindex empty = {} c = 0 for key in data1.variables.keys(): if c > 2: empty[key] = data1.variables[key][:, 2, 5] c += 1 df1 = pd.DataFrame(empty) df1.index = datetimeindex df1["tp"] = df["tp"] df1.corr() high_corr = df1.columns[np.abs(df1.corr()["tp"]) > 0.5] df1 = df1[high_corr].copy() df1 high_corr = df.columns[np.abs(df.corr()["tp"]) > 0.5] df = df[high_corr].copy() # df.drop(['mvimd'],axis=1,inplace=True) df.drop(["tp"], axis=1, inplace=True) data = pd.concat([df, df1], axis=1) data.drop(["mtpr"], axis=1, inplace=True) # data['target'] = data['tp'].shift(-1) # data=data[:-1] "mlspr = mean large scale precipitation rate" "tclw = Total column cloud liquid water" "mxtpr = Maximum total precipitation rate since previous post-processing" "lcc = low cloud cover" "mvimd = Mean vertically integrated moisture divergence" "mcc = Medium cloud cover" "tcc = Total cloud cover" "tp = Total precipitation" from statsmodels.graphics.tsaplots import plot_acf plot_acf(data["tp"]) def scaler(data): for column in data.columns: if data[column].mean() > 1: data[column] = (data[column] - data[column].min()) / ( data[column].max() - data[column].min() ) return data scaler(data) def split_data(data, time_steps): Xs, Ys = [], [] for rownum in range(len(data)): end = rownum + time_steps if end < len(data): seqx = data[rownum:end] seqy = data["tp"][end] Xs.append(seqx) Ys.append(seqy) return np.array(Xs), np.array(Ys) step = 3 X, y = split_data(data, step) Xtrain, ytrain = X[:8000], y[:8000] Xtest, ytest = X[8000:], y[8000:] from tensorflow.keras import Sequential from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow as tf model = Sequential() model.add( LSTM(24, input_shape=(Xtrain.shape[1], Xtrain.shape[2]), return_sequences=True) ) model.add(LSTM(12, input_shape=(Xtrain.shape[1], Xtrain.shape[2]))) # model.add(LSTM(50, activation='relu')) # model.add(Dense(12,'relu')) model.add(Dense(1, "linear")) model.summary() with tf.device("/GPU:0"): model.compile(loss="mae", optimizer=Adam(learning_rate=0.0003)) model.fit(Xtrain, ytrain, epochs=50) pre = model.predict(Xtest) from sklearn.metrics import mean_absolute_error mean_absolute_error(ytest, pre) vis = pd.DataFrame(ytest) vis["pre"] = pre vis.columns = ["test", "pre"] vis[:500].plot() vis import xgboost as xgb data["target"] = data["tp"].shift(-1) data = data[:-1] x = data.iloc[:, :3] y = data["target"] xtrain, ytrain = x[:8000], y[:8000] xtest, ytest = x[8000:], y[8000:] with tf.device("/GPU:0"): xgb_regressor = xgb.XGBRegressor( eval_metric="mape", n_estimators=2000, learning_rate=0.1 ) xgb_regressor.fit(xtrain, ytrain) pre = xgb_regressor.predict(xtest) mean_absolute_error(ytest, pre) frame = pd.DataFrame(pd.Series(pre, index=ytest.index)) frame["test"] = ytest frame.columns = ["pre", "test"] frame[:500].plot()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970235.ipynb
null
null
[{"Id": 129970235, "ScriptId": 38350548, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11237154, "CreationDate": "05/17/2023 20:01:53", "VersionNumber": 3.0, "Title": "LSTM-XGBoost Precipitation Nowcast", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 143.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 128.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import os from netCDF4 import Dataset data = Dataset( r"/kaggle/input/istanbul-era5-reanalysis-data-2022/adaptor.mars.internal-1683572405.9284267-22767-2-3b607d67-6460-4af3-9a2e-5b18003a18e3.nc", "r", ) data1 = Dataset( r"/kaggle/input/istanbul-era5-reanalysis-data-2022-2/adaptor.mars.internal-1683640761.811922-13030-8-47cadd82-dda7-4d14-b2d9-dee7d4e16434.nc", "r", ) empty = {} c = 0 for key in data.variables.keys(): if c > 2: empty[key] = data.variables[key][:, 2, 5] c += 1 df = pd.DataFrame(empty) datetimeindex = pd.date_range("2022-01-01", periods=8760, freq="1h") df.index = datetimeindex empty = {} c = 0 for key in data1.variables.keys(): if c > 2: empty[key] = data1.variables[key][:, 2, 5] c += 1 df1 = pd.DataFrame(empty) df1.index = datetimeindex df1["tp"] = df["tp"] df1.corr() high_corr = df1.columns[np.abs(df1.corr()["tp"]) > 0.5] df1 = df1[high_corr].copy() df1 high_corr = df.columns[np.abs(df.corr()["tp"]) > 0.5] df = df[high_corr].copy() # df.drop(['mvimd'],axis=1,inplace=True) df.drop(["tp"], axis=1, inplace=True) data = pd.concat([df, df1], axis=1) data.drop(["mtpr"], axis=1, inplace=True) # data['target'] = data['tp'].shift(-1) # data=data[:-1] "mlspr = mean large scale precipitation rate" "tclw = Total column cloud liquid water" "mxtpr = Maximum total precipitation rate since previous post-processing" "lcc = low cloud cover" "mvimd = Mean vertically integrated moisture divergence" "mcc = Medium cloud cover" "tcc = Total cloud cover" "tp = Total precipitation" from statsmodels.graphics.tsaplots import plot_acf plot_acf(data["tp"]) def scaler(data): for column in data.columns: if data[column].mean() > 1: data[column] = (data[column] - data[column].min()) / ( data[column].max() - data[column].min() ) return data scaler(data) def split_data(data, time_steps): Xs, Ys = [], [] for rownum in range(len(data)): end = rownum + time_steps if end < len(data): seqx = data[rownum:end] seqy = data["tp"][end] Xs.append(seqx) Ys.append(seqy) return np.array(Xs), np.array(Ys) step = 3 X, y = split_data(data, step) Xtrain, ytrain = X[:8000], y[:8000] Xtest, ytest = X[8000:], y[8000:] from tensorflow.keras import Sequential from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow as tf model = Sequential() model.add( LSTM(24, input_shape=(Xtrain.shape[1], Xtrain.shape[2]), return_sequences=True) ) model.add(LSTM(12, input_shape=(Xtrain.shape[1], Xtrain.shape[2]))) # model.add(LSTM(50, activation='relu')) # model.add(Dense(12,'relu')) model.add(Dense(1, "linear")) model.summary() with tf.device("/GPU:0"): model.compile(loss="mae", optimizer=Adam(learning_rate=0.0003)) model.fit(Xtrain, ytrain, epochs=50) pre = model.predict(Xtest) from sklearn.metrics import mean_absolute_error mean_absolute_error(ytest, pre) vis = pd.DataFrame(ytest) vis["pre"] = pre vis.columns = ["test", "pre"] vis[:500].plot() vis import xgboost as xgb data["target"] = data["tp"].shift(-1) data = data[:-1] x = data.iloc[:, :3] y = data["target"] xtrain, ytrain = x[:8000], y[:8000] xtest, ytest = x[8000:], y[8000:] with tf.device("/GPU:0"): xgb_regressor = xgb.XGBRegressor( eval_metric="mape", n_estimators=2000, learning_rate=0.1 ) xgb_regressor.fit(xtrain, ytrain) pre = xgb_regressor.predict(xtest) mean_absolute_error(ytest, pre) frame = pd.DataFrame(pd.Series(pre, index=ytest.index)) frame["test"] = ytest frame.columns = ["pre", "test"] frame[:500].plot()
false
0
1,455
0
1,455
1,455
129970149
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # importing the libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) from sklearn.preprocessing import LabelEncoder import xgboost from sklearn.model_selection import RandomizedSearchCV # importing the test & train dataset test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") test_id = test["Id"] test.drop( columns=["Id"], axis=1, inplace=True ) # droping the id columns from the dataset test.shape train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) train.drop( columns=["Id"], axis=1, inplace=True ) # droping the id columns from the dataset train.shape # concating the tain and test dataset columns after removing the target column from train dataset. y_train = train["SalePrice"] n_train = train.shape[0] n_test = test.shape[0] df_train = pd.concat([train, test]).reset_index(drop=True) df_train.drop(columns=["SalePrice"], axis=0, inplace=True) # statstical view of dataset df_train.describe() # train data correlation with target variable cor = train.corr() corr = cor.sort_values(by=["SalePrice"], ascending=False) # correlation of the test dataset plt.figure(figsize=(30, 20)) sns.heatmap(corr, cmap=plt.cm.CMRmap_r, annot=True) plt.show() df_train.info() # null value analysis on the data set sns.heatmap(df_train.isna(), yticklabels=False, cbar=False, cmap="viridis") # df_train data correlation cor = df_train.corr() # correlation of the test dataset plt.figure(figsize=(30, 20)) sns.heatmap(cor, cmap=plt.cm.CMRmap_r, annot=True) plt.show() # frequency for each df_train dataset shown by lineplot for i in df_train.columns: plt.figure(figsize=(14, 4)) sns.lineplot(df_train[i]) plt.show() # saleprice outlier sns.boxplot(train["SalePrice"]) import warnings warnings.filterwarnings("ignore") # skewness and qq plot of the dataset train_ske = df_train[ [ "MSSubClass", "LotArea", "OverallQual", "OverallCond", "YearBuilt", "YearRemodAdd", "1stFlrSF", "2ndFlrSF", "LowQualFinSF", "GrLivArea", "FullBath", "HalfBath", "BedroomAbvGr", "KitchenAbvGr", "TotRmsAbvGrd", "Fireplaces", "WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch", "ScreenPorch", "PoolArea", "MiscVal", "MoSold", "YrSold", ] ] from scipy import stats for col in train_ske.columns: plt.figure(figsize=(14, 4)) plt.subplot(121) sns.distplot(train_ske[col]) plt.title(col) plt.subplot(122) stats.probplot(train_ske[col], dist="norm", plot=plt) plt.title(col) plt.show() # Automated library for the suggestions of Datacleaning from autoviz import data_cleaning_suggestions data_cleaning_suggestions(train)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970149.ipynb
null
null
[{"Id": 129970149, "ScriptId": 38658539, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9525730, "CreationDate": "05/17/2023 20:00:44", "VersionNumber": 1.0, "Title": "notebook667a6c0bf6", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 111.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # importing the libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) from sklearn.preprocessing import LabelEncoder import xgboost from sklearn.model_selection import RandomizedSearchCV # importing the test & train dataset test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") test_id = test["Id"] test.drop( columns=["Id"], axis=1, inplace=True ) # droping the id columns from the dataset test.shape train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) train.drop( columns=["Id"], axis=1, inplace=True ) # droping the id columns from the dataset train.shape # concating the tain and test dataset columns after removing the target column from train dataset. y_train = train["SalePrice"] n_train = train.shape[0] n_test = test.shape[0] df_train = pd.concat([train, test]).reset_index(drop=True) df_train.drop(columns=["SalePrice"], axis=0, inplace=True) # statstical view of dataset df_train.describe() # train data correlation with target variable cor = train.corr() corr = cor.sort_values(by=["SalePrice"], ascending=False) # correlation of the test dataset plt.figure(figsize=(30, 20)) sns.heatmap(corr, cmap=plt.cm.CMRmap_r, annot=True) plt.show() df_train.info() # null value analysis on the data set sns.heatmap(df_train.isna(), yticklabels=False, cbar=False, cmap="viridis") # df_train data correlation cor = df_train.corr() # correlation of the test dataset plt.figure(figsize=(30, 20)) sns.heatmap(cor, cmap=plt.cm.CMRmap_r, annot=True) plt.show() # frequency for each df_train dataset shown by lineplot for i in df_train.columns: plt.figure(figsize=(14, 4)) sns.lineplot(df_train[i]) plt.show() # saleprice outlier sns.boxplot(train["SalePrice"]) import warnings warnings.filterwarnings("ignore") # skewness and qq plot of the dataset train_ske = df_train[ [ "MSSubClass", "LotArea", "OverallQual", "OverallCond", "YearBuilt", "YearRemodAdd", "1stFlrSF", "2ndFlrSF", "LowQualFinSF", "GrLivArea", "FullBath", "HalfBath", "BedroomAbvGr", "KitchenAbvGr", "TotRmsAbvGrd", "Fireplaces", "WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch", "ScreenPorch", "PoolArea", "MiscVal", "MoSold", "YrSold", ] ] from scipy import stats for col in train_ske.columns: plt.figure(figsize=(14, 4)) plt.subplot(121) sns.distplot(train_ske[col]) plt.title(col) plt.subplot(122) stats.probplot(train_ske[col], dist="norm", plot=plt) plt.title(col) plt.show() # Automated library for the suggestions of Datacleaning from autoviz import data_cleaning_suggestions data_cleaning_suggestions(train)
false
0
1,118
0
1,118
1,118
129970011
<jupyter_start><jupyter_text>Ocular Disease Recognition # About this Data &gt; Ocular Disease Intelligent Recognition (ODIR) is a structured ophthalmic database of 5,000 patients with age, color fundus photographs from left and right eyes and doctors' diagnostic keywords from doctors. &gt; This dataset is meant to represent ‘‘real-life’’ set of patient information collected by Shanggong Medical Technology Co., Ltd. from different hospitals/medical centers in China. In these institutions, fundus images are captured by various cameras in the market, such as Canon, Zeiss and Kowa, resulting into varied image resolutions. Annotations were labeled by trained human readers with quality control management. They classify patient into eight labels including: - Normal (N), - Diabetes (D), - Glaucoma (G), - Cataract (C), - Age related Macular Degeneration (A), - Hypertension (H), - Pathological Myopia (M), - Other diseases/abnormalities (O) ## License &gt; License was not specified on source ## Splash Image &gt; Image from <a href="https://pixabay.com/pt/users/Matryx-15948447/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5061291">Omni Matryx</a> by <a href="https://pixabay.com/pt/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5061291">Pixabay</a> Kaggle dataset identifier: ocular-disease-recognition-odir5k <jupyter_script>import numpy as np import cv2 import os import pandas as pd from random import sample import seaborn as sns import matplotlib.pyplot as plt from scikitplot.metrics import plot_confusion_matrix as plt_con_mat from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Conv2D, Dense, Dropout, MaxPooling2D, Flatten from keras.utils import plot_model path = "../input/ocular-disease-recognition-odir5k" df = pd.read_csv(os.path.join(path, "full_df.csv")) df.head() file_names = [] labels = [] for text, label, file_name in zip( df["Left-Diagnostic Keywords"], df["C"], df["Left-Fundus"] ): if ("cataract" in text) and (label == 1): file_names.append(file_name) labels.append(1) elif ("normal fundus" in text) and (label == 0): file_names.append(file_name) labels.append(0) for text, label, file_name in zip( df["Right-Diagnostic Keywords"], df["C"], df["Right-Fundus"] ): if ("cataract" in text) and (label == 1): file_names.append(file_name) labels.append(1) elif ("normal fundus" in text) and (label == 0): file_names.append(file_name) labels.append(0) print(len(file_names), len(labels)) plt.bar( [0, 1], [len([i for i in labels if i == 1]), len([i for i in labels if i == 0])], color=["r", "g"], ) plt.xticks([0, 1], ["Cataract", "Normal"]) plt.show() ROW = 224 COL = 224 image_data = [] for idx, image_name in enumerate(file_names): img = cv2.imread(os.path.join(path, "preprocessed_images", image_name)) try: img = cv2.resize(img, (ROW, COL)) image_data.append(img) except: del labels[idx] image_data = np.array(image_data) print(image_data.shape) temp = [] for idx, label in enumerate(labels): if label == 0: temp.append(idx) temp = sample(temp, len([label for label in labels if label == 1])) X_data = [] y_data = [] for idx in temp: X_data.append(image_data[idx]) y_data.append(labels[idx]) temp = [] for idx, label in enumerate(labels): if label == 1: temp.append(idx) for idx in temp: X_data.append(image_data[idx]) y_data.append(labels[idx]) X_data = np.array(X_data) y_data = np.array(y_data) y_data = np.expand_dims(y_data, axis=-1) y_data = to_categorical(y_data) print(X_data.shape, y_data.shape) X_train, X_test, y_train, y_test = train_test_split( X_data, y_data, test_size=0.2, shuffle=True, random_state=1 ) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) c = 0 n = 0 cataract_images = [] normal_images = [] for idx, label in enumerate(y_data): if n <= 5 and np.argmax(label) == 0: normal_images.append(idx) n += 1 elif c <= 5: cataract_images.append(idx) c += 1 if n == 5 and c == 5: break fig, ax = plt.subplots(5, 2, figsize=(20, 20)) ax[0, 0].title.set_text("Cataract") ax[0, 1].title.set_text("Normal") for i in range(5): ax[i, 0].imshow(X_data[cataract_images[i]]) ax[i, 0].axis("off") ax[i, 1].imshow(X_data[normal_images[i]]) ax[i, 1].axis("off") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970011.ipynb
ocular-disease-recognition-odir5k
andrewmvd
[{"Id": 129970011, "ScriptId": 38653563, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15043471, "CreationDate": "05/17/2023 19:58:49", "VersionNumber": 1.0, "Title": "Cataract Detection", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 135.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186409168, "KernelVersionId": 129970011, "SourceDatasetVersionId": 1512919}]
[{"Id": 1512919, "DatasetId": 611716, "DatasourceVersionId": 1547166, "CreatorUserId": 793761, "LicenseName": "Other (specified in description)", "CreationDate": "09/24/2020 16:26:29", "VersionNumber": 2.0, "Title": "Ocular Disease Recognition", "Slug": "ocular-disease-recognition-odir5k", "Subtitle": "Right and left eye fundus photographs of 5000 patients", "Description": "# About this Data\n&gt; Ocular Disease Intelligent Recognition (ODIR) is a structured ophthalmic database of 5,000 patients with age, color fundus photographs from left and right eyes and doctors' diagnostic keywords from doctors.\n\n&gt; This dataset is meant to represent \u2018\u2018real-life\u2019\u2019 set of patient information collected by Shanggong Medical Technology Co., Ltd. from different hospitals/medical centers in China. In these institutions, fundus images are captured by various cameras in the market, such as Canon, Zeiss and Kowa, resulting into varied image resolutions. \nAnnotations were labeled by trained human readers with quality control management. They classify patient into eight labels including:\n- Normal (N),\n- Diabetes (D),\n- Glaucoma (G),\n- Cataract (C),\n- Age related Macular Degeneration (A),\n- Hypertension (H),\n- Pathological Myopia (M),\n- Other diseases/abnormalities (O)\n\n\n## License\n&gt; License was not specified on source\n\n## Splash Image\n&gt; Image from <a href=\"https://pixabay.com/pt/users/Matryx-15948447/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5061291\">Omni Matryx</a> by <a href=\"https://pixabay.com/pt/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=5061291\">Pixabay</a>", "VersionNotes": "added preprocessed images for ease of use", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 611716, "CreatorUserId": 793761, "OwnerUserId": 793761.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1512919.0, "CurrentDatasourceVersionId": 1547166.0, "ForumId": 625778, "Type": 2, "CreationDate": "04/19/2020 19:19:40", "LastActivityDate": "04/19/2020", "TotalViews": 159950, "TotalDownloads": 20302, "TotalVotes": 330, "TotalKernels": 80}]
[{"Id": 793761, "UserName": "andrewmvd", "DisplayName": "Larxel", "RegisterDate": "11/15/2016", "PerformanceTier": 4}]
import numpy as np import cv2 import os import pandas as pd from random import sample import seaborn as sns import matplotlib.pyplot as plt from scikitplot.metrics import plot_confusion_matrix as plt_con_mat from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Conv2D, Dense, Dropout, MaxPooling2D, Flatten from keras.utils import plot_model path = "../input/ocular-disease-recognition-odir5k" df = pd.read_csv(os.path.join(path, "full_df.csv")) df.head() file_names = [] labels = [] for text, label, file_name in zip( df["Left-Diagnostic Keywords"], df["C"], df["Left-Fundus"] ): if ("cataract" in text) and (label == 1): file_names.append(file_name) labels.append(1) elif ("normal fundus" in text) and (label == 0): file_names.append(file_name) labels.append(0) for text, label, file_name in zip( df["Right-Diagnostic Keywords"], df["C"], df["Right-Fundus"] ): if ("cataract" in text) and (label == 1): file_names.append(file_name) labels.append(1) elif ("normal fundus" in text) and (label == 0): file_names.append(file_name) labels.append(0) print(len(file_names), len(labels)) plt.bar( [0, 1], [len([i for i in labels if i == 1]), len([i for i in labels if i == 0])], color=["r", "g"], ) plt.xticks([0, 1], ["Cataract", "Normal"]) plt.show() ROW = 224 COL = 224 image_data = [] for idx, image_name in enumerate(file_names): img = cv2.imread(os.path.join(path, "preprocessed_images", image_name)) try: img = cv2.resize(img, (ROW, COL)) image_data.append(img) except: del labels[idx] image_data = np.array(image_data) print(image_data.shape) temp = [] for idx, label in enumerate(labels): if label == 0: temp.append(idx) temp = sample(temp, len([label for label in labels if label == 1])) X_data = [] y_data = [] for idx in temp: X_data.append(image_data[idx]) y_data.append(labels[idx]) temp = [] for idx, label in enumerate(labels): if label == 1: temp.append(idx) for idx in temp: X_data.append(image_data[idx]) y_data.append(labels[idx]) X_data = np.array(X_data) y_data = np.array(y_data) y_data = np.expand_dims(y_data, axis=-1) y_data = to_categorical(y_data) print(X_data.shape, y_data.shape) X_train, X_test, y_train, y_test = train_test_split( X_data, y_data, test_size=0.2, shuffle=True, random_state=1 ) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) c = 0 n = 0 cataract_images = [] normal_images = [] for idx, label in enumerate(y_data): if n <= 5 and np.argmax(label) == 0: normal_images.append(idx) n += 1 elif c <= 5: cataract_images.append(idx) c += 1 if n == 5 and c == 5: break fig, ax = plt.subplots(5, 2, figsize=(20, 20)) ax[0, 0].title.set_text("Cataract") ax[0, 1].title.set_text("Normal") for i in range(5): ax[i, 0].imshow(X_data[cataract_images[i]]) ax[i, 0].axis("off") ax[i, 1].imshow(X_data[normal_images[i]]) ax[i, 1].axis("off") plt.show()
false
0
1,154
1
1,574
1,154
129970292
<jupyter_start><jupyter_text>Used Car Auction Prices ### About the dataset The dataset contains historical car auction sales prices, scraped from the outside internet sources. The dataset has been collected in 2015, and will not be updated. Kaggle dataset identifier: used-car-auction-prices <jupyter_script># # I. DATA SET AND PREPROCESSING # I use dataset from Kaggle for used car auction price prediction. The dataset contains various features that are required to predict and classify the range of prices of used cars. import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import plotly from scipy import stats import warnings warnings.filterwarnings("ignore") # Libraries for ML from sklearn import preprocessing from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor data = pd.read_csv( "/kaggle/input/used-car-auction-prices/car_prices.csv", on_bad_lines="skip" ) print("row number: ", len(data)) print("column number: ", len(data.columns)) data.head(3) data.info() print("Most important features relative to selling price:") corr = data.corr() corr.sort_values(["sellingprice"], ascending=False, inplace=True) print(corr.sellingprice) data.describe() data.isnull().sum() # I don't need columns "seller", "saledate" and "vin" since they don't influence the price. # "mmr" though seems to influence the price a lot, but according to it's defenition and correlation it's actually a variable that depends on every other feature. I dropped it too. data = data.dropna(how="any") data.drop(columns=["vin", "seller", "saledate", "mmr"], inplace=True) # Here is what I have as a result: data.shape data.hist(bins=50, figsize=(20, 15)) plt.show() listtrain = data["make"] # prints the missing in listtrain print("Missing values in first list:", (set(listtrain).difference(listtrain))) # There are some values like "SUV" and "suv" in our dataset, so I make all string occurances in lower case. data["transmission"].replace(["manual", "automatic"], [0, 1], inplace=True) prev_unique = len(data["body"].unique()) for col in data.columns: if type(data[col][0]) is str: data[col] = data[col].apply(lambda x: x.lower()) curr_unique = len(data["body"].unique()) data.head(5) print(prev_unique, curr_unique) data.isnull().sum() data.dtypes data.describe() # # III. EXPLORATORY DATA ANALYSIS # After preprocessing the data, it is analyzed through visual exploration to gather insights about the model that # can be applied to the data, understand the diversity in the data and the range of every field. data.head(3) # Now, let's check the Price first. sns.distplot(data["sellingprice"]) print("Skewness: %f" % data["sellingprice"].skew()) print("Kurtosis: %f" % data["sellingprice"].kurt()) # We can observe that the distribution of prices shows a high positive skewness to the left (skew > 1). A kurtosis value of 12 is very high, meaning that there is a profusion of outliers in the dataset. # applying log transformation data["sellingprice"] = np.log(data["sellingprice"]) # transformed histogram and normal probability plot sns.distplot(data["sellingprice"], fit=None) fig = plt.figure() res = stats.probplot(data["sellingprice"], plot=plt) # I found that converting the value of sellingprice to Log(sellingprice) might be a good solution to have a more normal visualization of the distribution of the Price, however, this alternative has no major or decisive effect on the results of the train and/ or predict procedure in the next section. Therefore, in order not to complicate matters, I decided to keep the whole processed database up to this step to analyze the parameters' correlations and conduct the modeling in the following section. # # MODEL DESCRIPTION # To compute the price for vehicles, this platform may compute linear regression model that defines a set of input variables. However, it does not give details as what features can be used for specific type of vehicles for such prediction. We have taken important features for predicting the price of used cars using random forest models. # The author of some jupyter notebook evaluates the performance of several classification methods (logistic regression, SVM, decision tree, Extra Trees, AdaBoost, random forest) to assess the performance on similar dataset. Among all these models, random forest classifier proves to perform the best for their prediction task. # This work uses 11 features ('Cars', 'Location', 'Year', 'Kilometers_Driven', 'Fuel_Type', 'Transmission', 'Owner_Type', 'Mileage', 'Engine', 'Power', 'Seats') to perform the classification task after removal of irrelevant features from the dataset which gives an accuracy of 96.2% on the test data. I also use Kaggle data-set to perform prediction of used-car prices. # **A. Data preparation & Model Parameters** # In this Notebook, I do not discuss in deep about the Models' parameters, I just applied the standard or refer to previous recommendations. Let's copy the database. import copy df_train = copy.deepcopy(data) cols = np.array(data.columns[data.dtypes != object]) for i in df_train.columns: if i not in cols: df_train[i] = df_train[i].map(str) df_train.drop(columns=cols, inplace=True) df_train.head(10) # And then, coding the categorical parameters using LabelEncoder. from sklearn.preprocessing import LabelEncoder from collections import defaultdict # build dictionary function cols = np.array(data.columns[data.dtypes != object]) d = defaultdict(LabelEncoder) # only for categorical columns apply dictionary by calling fit_transform df_train = df_train.apply(lambda x: d[x.name].fit_transform(x)) df_train[cols] = data[cols] df_train.head(10) # # Relationship of price with other parameter print("Most important features relative to selling price:") corr = df_train.corr() corr.sort_values(["sellingprice"], ascending=False, inplace=True) print(corr.sellingprice) # # Training and Testing # I split the dataset into training, testing data with a 70:30 split ratio. The splitting was done by picking at random which results in a balance between the training data and testing data amongst the whole dataset. This is done to avoid overfitting and enhance generalization. ftrain = [ "year", "make", "model", "trim", "body", "transmission", "state", "condition", "odometer", "color", "interior", "sellingprice", ] def Definedata(): # define dataset data2 = df_train[ftrain] X = data2.drop(columns=["sellingprice"]).values y0 = data2["sellingprice"].values lab_enc = preprocessing.LabelEncoder() y = lab_enc.fit_transform(y0) return X, y from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() X, y = Definedata() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) model.fit(X_train, y_train) model.score(X_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970292.ipynb
used-car-auction-prices
tunguz
[{"Id": 129970292, "ScriptId": 38656187, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14589472, "CreationDate": "05/17/2023 20:02:25", "VersionNumber": 1.0, "Title": "notebookab50f2b013", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 175.0, "LinesInsertedFromPrevious": 175.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186409499, "KernelVersionId": 129970292, "SourceDatasetVersionId": 2246965}]
[{"Id": 2246965, "DatasetId": 1351279, "DatasourceVersionId": 2287865, "CreatorUserId": 417337, "LicenseName": "CC0: Public Domain", "CreationDate": "05/18/2021 17:49:43", "VersionNumber": 1.0, "Title": "Used Car Auction Prices", "Slug": "used-car-auction-prices", "Subtitle": "Used Car Auction Prices", "Description": "### About the dataset\n\nThe dataset contains historical car auction sales prices, scraped from the outside internet sources. The dataset has been collected in 2015, and will not be updated.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1351279, "CreatorUserId": 417337, "OwnerUserId": 417337.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2246965.0, "CurrentDatasourceVersionId": 2287865.0, "ForumId": 1370311, "Type": 2, "CreationDate": "05/18/2021 17:49:43", "LastActivityDate": "05/18/2021", "TotalViews": 42362, "TotalDownloads": 5240, "TotalVotes": 101, "TotalKernels": 13}]
[{"Id": 417337, "UserName": "tunguz", "DisplayName": "Bojan Tunguz", "RegisterDate": "09/11/2015", "PerformanceTier": 4}]
# # I. DATA SET AND PREPROCESSING # I use dataset from Kaggle for used car auction price prediction. The dataset contains various features that are required to predict and classify the range of prices of used cars. import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import plotly from scipy import stats import warnings warnings.filterwarnings("ignore") # Libraries for ML from sklearn import preprocessing from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor data = pd.read_csv( "/kaggle/input/used-car-auction-prices/car_prices.csv", on_bad_lines="skip" ) print("row number: ", len(data)) print("column number: ", len(data.columns)) data.head(3) data.info() print("Most important features relative to selling price:") corr = data.corr() corr.sort_values(["sellingprice"], ascending=False, inplace=True) print(corr.sellingprice) data.describe() data.isnull().sum() # I don't need columns "seller", "saledate" and "vin" since they don't influence the price. # "mmr" though seems to influence the price a lot, but according to it's defenition and correlation it's actually a variable that depends on every other feature. I dropped it too. data = data.dropna(how="any") data.drop(columns=["vin", "seller", "saledate", "mmr"], inplace=True) # Here is what I have as a result: data.shape data.hist(bins=50, figsize=(20, 15)) plt.show() listtrain = data["make"] # prints the missing in listtrain print("Missing values in first list:", (set(listtrain).difference(listtrain))) # There are some values like "SUV" and "suv" in our dataset, so I make all string occurances in lower case. data["transmission"].replace(["manual", "automatic"], [0, 1], inplace=True) prev_unique = len(data["body"].unique()) for col in data.columns: if type(data[col][0]) is str: data[col] = data[col].apply(lambda x: x.lower()) curr_unique = len(data["body"].unique()) data.head(5) print(prev_unique, curr_unique) data.isnull().sum() data.dtypes data.describe() # # III. EXPLORATORY DATA ANALYSIS # After preprocessing the data, it is analyzed through visual exploration to gather insights about the model that # can be applied to the data, understand the diversity in the data and the range of every field. data.head(3) # Now, let's check the Price first. sns.distplot(data["sellingprice"]) print("Skewness: %f" % data["sellingprice"].skew()) print("Kurtosis: %f" % data["sellingprice"].kurt()) # We can observe that the distribution of prices shows a high positive skewness to the left (skew > 1). A kurtosis value of 12 is very high, meaning that there is a profusion of outliers in the dataset. # applying log transformation data["sellingprice"] = np.log(data["sellingprice"]) # transformed histogram and normal probability plot sns.distplot(data["sellingprice"], fit=None) fig = plt.figure() res = stats.probplot(data["sellingprice"], plot=plt) # I found that converting the value of sellingprice to Log(sellingprice) might be a good solution to have a more normal visualization of the distribution of the Price, however, this alternative has no major or decisive effect on the results of the train and/ or predict procedure in the next section. Therefore, in order not to complicate matters, I decided to keep the whole processed database up to this step to analyze the parameters' correlations and conduct the modeling in the following section. # # MODEL DESCRIPTION # To compute the price for vehicles, this platform may compute linear regression model that defines a set of input variables. However, it does not give details as what features can be used for specific type of vehicles for such prediction. We have taken important features for predicting the price of used cars using random forest models. # The author of some jupyter notebook evaluates the performance of several classification methods (logistic regression, SVM, decision tree, Extra Trees, AdaBoost, random forest) to assess the performance on similar dataset. Among all these models, random forest classifier proves to perform the best for their prediction task. # This work uses 11 features ('Cars', 'Location', 'Year', 'Kilometers_Driven', 'Fuel_Type', 'Transmission', 'Owner_Type', 'Mileage', 'Engine', 'Power', 'Seats') to perform the classification task after removal of irrelevant features from the dataset which gives an accuracy of 96.2% on the test data. I also use Kaggle data-set to perform prediction of used-car prices. # **A. Data preparation & Model Parameters** # In this Notebook, I do not discuss in deep about the Models' parameters, I just applied the standard or refer to previous recommendations. Let's copy the database. import copy df_train = copy.deepcopy(data) cols = np.array(data.columns[data.dtypes != object]) for i in df_train.columns: if i not in cols: df_train[i] = df_train[i].map(str) df_train.drop(columns=cols, inplace=True) df_train.head(10) # And then, coding the categorical parameters using LabelEncoder. from sklearn.preprocessing import LabelEncoder from collections import defaultdict # build dictionary function cols = np.array(data.columns[data.dtypes != object]) d = defaultdict(LabelEncoder) # only for categorical columns apply dictionary by calling fit_transform df_train = df_train.apply(lambda x: d[x.name].fit_transform(x)) df_train[cols] = data[cols] df_train.head(10) # # Relationship of price with other parameter print("Most important features relative to selling price:") corr = df_train.corr() corr.sort_values(["sellingprice"], ascending=False, inplace=True) print(corr.sellingprice) # # Training and Testing # I split the dataset into training, testing data with a 70:30 split ratio. The splitting was done by picking at random which results in a balance between the training data and testing data amongst the whole dataset. This is done to avoid overfitting and enhance generalization. ftrain = [ "year", "make", "model", "trim", "body", "transmission", "state", "condition", "odometer", "color", "interior", "sellingprice", ] def Definedata(): # define dataset data2 = df_train[ftrain] X = data2.drop(columns=["sellingprice"]).values y0 = data2["sellingprice"].values lab_enc = preprocessing.LabelEncoder() y = lab_enc.fit_transform(y0) return X, y from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() X, y = Definedata() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) model.fit(X_train, y_train) model.score(X_test, y_test)
false
1
1,807
0
1,875
1,807
129970918
<jupyter_start><jupyter_text>Mlebourne 2016-2017 Kaggle dataset identifier: mlebourne-20162017 <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/mlebourne-20162017/melb_data.csv") df.head() # # Exercice:corriger les valeurs nulles des colonnes suivantes # # 1-Car:remplacer les valeurs nulles par la constante 0 # # 2-YearBuilt : remplacer les valeurs nulles par la mediane # # 3-BuildingArea : remplacer les valeurs nulles par la moyenne regroupé par la colonne Rooms # df.info() df.Car.fillna(0, inplace=True) df.info() df["YearBuilt"].median() df["YearBuilt"].fillna((df["YearBuilt"].median()), inplace=True) df.head() df["BuildingArea"] = df["BuildingArea"].fillna( df.groupby("Rooms")["BuildingArea"].transform("mean"), inplace=True ) df.head() df[df.Rooms == 10] df.loc[11304, "BuildingArea"] = 600 df[df.Rooms == 10] df.info() # # Exercice : Ecrire un modele qui permet des prédire les prix des maisons a partir des informations fournies en suivant les étapes vues dans le cours. X = df.loc[:, ["Landsize"]] y = df.loc[:, "Price"] X y from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2) lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) y_pred = lin_reg.predict(X_test) from sklearn.metrics import mean_squared_error import numpy as np mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse)) from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(random_state=1122) tree_reg.fit(X_train, y_train) y_pred = tree_reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse)) from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor(n_estimators=100, random_state=7) forest_reg.fit(X_train, y_train) y_pred = forest_reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970918.ipynb
mlebourne-20162017
yan08042
[{"Id": 129970918, "ScriptId": 38115947, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13958568, "CreationDate": "05/17/2023 20:11:01", "VersionNumber": 1.0, "Title": "TP3 Data Science", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 76.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 186410280, "KernelVersionId": 129970918, "SourceDatasetVersionId": 2616660}]
[{"Id": 2616660, "DatasetId": 1590461, "DatasourceVersionId": 2660319, "CreatorUserId": 1947218, "LicenseName": "Unknown", "CreationDate": "09/15/2021 06:42:33", "VersionNumber": 3.0, "Title": "Mlebourne 2016-2017", "Slug": "mlebourne-20162017", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2021/09/15", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1590461, "CreatorUserId": 1947218, "OwnerUserId": 1947218.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2616660.0, "CurrentDatasourceVersionId": 2660319.0, "ForumId": 1610646, "Type": 2, "CreationDate": "09/15/2021 06:37:28", "LastActivityDate": "09/15/2021", "TotalViews": 1640, "TotalDownloads": 33, "TotalVotes": 1, "TotalKernels": 7}]
[{"Id": 1947218, "UserName": "yan08042", "DisplayName": "Cherry", "RegisterDate": "05/28/2018", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/mlebourne-20162017/melb_data.csv") df.head() # # Exercice:corriger les valeurs nulles des colonnes suivantes # # 1-Car:remplacer les valeurs nulles par la constante 0 # # 2-YearBuilt : remplacer les valeurs nulles par la mediane # # 3-BuildingArea : remplacer les valeurs nulles par la moyenne regroupé par la colonne Rooms # df.info() df.Car.fillna(0, inplace=True) df.info() df["YearBuilt"].median() df["YearBuilt"].fillna((df["YearBuilt"].median()), inplace=True) df.head() df["BuildingArea"] = df["BuildingArea"].fillna( df.groupby("Rooms")["BuildingArea"].transform("mean"), inplace=True ) df.head() df[df.Rooms == 10] df.loc[11304, "BuildingArea"] = 600 df[df.Rooms == 10] df.info() # # Exercice : Ecrire un modele qui permet des prédire les prix des maisons a partir des informations fournies en suivant les étapes vues dans le cours. X = df.loc[:, ["Landsize"]] y = df.loc[:, "Price"] X y from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2) lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) y_pred = lin_reg.predict(X_test) from sklearn.metrics import mean_squared_error import numpy as np mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse)) from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(random_state=1122) tree_reg.fit(X_train, y_train) y_pred = tree_reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse)) from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor(n_estimators=100, random_state=7) forest_reg.fit(X_train, y_train) y_pred = forest_reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) rmse = np.sqrt(mse) print("RMSE : %0.2f" % (rmse))
false
1
790
2
831
790
129970729
<jupyter_start><jupyter_text>Personal Key Indicators of Heart Disease # Key Indicators of Heart Disease ## 2020 annual CDC survey data of 400k adults related to their health status ### What topic does the dataset cover? According to the [CDC](https://www.cdc.gov/heartdisease/risk_factors.htm), heart disease is one of the leading causes of death for people of most races in the US (African Americans, American Indians and Alaska Natives, and white people). About half of all Americans (47%) have at least 1 of 3 key risk factors for heart disease: high blood pressure, high cholesterol, and smoking. Other key indicator include diabetic status, obesity (high BMI), not getting enough physical activity or drinking too much alcohol. Detecting and preventing the factors that have the greatest impact on heart disease is very important in healthcare. Computational developments, in turn, allow the application of machine learning methods to detect "patterns" from the data that can predict a patient's condition. ### Where did the dataset come from and what treatments did it undergo? Originally, the dataset come from the CDC and is a major part of the Behavioral Risk Factor Surveillance System (BRFSS), which conducts annual telephone surveys to gather data on the health status of U.S. residents. As the [CDC](https://www.cdc.gov/heartdisease/risk_factors.htm) describes: "Established in 1984 with 15 states, BRFSS now collects data in all 50 states as well as the District of Columbia and three U.S. territories. BRFSS completes more than 400,000 adult interviews each year, making it the largest continuously conducted health survey system in the world.". The most recent dataset (as of February 15, 2022) includes data from 2020. It consists of 401,958 rows and 279 columns. The vast majority of columns are questions asked to respondents about their health status, such as "Do you have serious difficulty walking or climbing stairs?" or "Have you smoked at least 100 cigarettes in your entire life? [Note: 5 packs = 100 cigarettes]". In this dataset, I noticed many different factors (questions) that directly or indirectly influence heart disease, so I decided to select the most relevant variables from it and do some cleaning so that it would be usable for machine learning projects. ### What can you do with this dataset? As described above, the original dataset of nearly 300 variables was reduced to just about 20 variables. In addition to classical EDA, this dataset can be used to apply a range of machine learning methods, most notably classifier models (logistic regression, SVM, random forest, etc.). You should treat the variable "HeartDisease" as a binary ("Yes" - respondent had heart disease; "No" - respondent had no heart disease). But note that classes are not balanced, so the classic model application approach is not advisable. Fixing the weights/undersampling should yield significantly betters results. Based on the dataset, I constructed a logistic regression model and embedded it in an application you might be inspired by: https://share.streamlit.io/kamilpytlak/heart-condition-checker/main/app.py. Can you indicate which variables have a significant effect on the likelihood of heart disease? Kaggle dataset identifier: personal-key-indicators-of-heart-disease <jupyter_script># tools used import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns heart = pd.read_csv("/kaggle/input/heart-disease-file/Heart_diease_dataset.csv") # the cleaned Data Set is called cleaned_df # # Cleaning and transforming the data # 1. Check if there are missing values and fill it or delete it # 2. converting non-numerical values into numbers # 3. split data between x and y # 1. Check if there are missing values heart.isna().sum() # 2. Need to change all the categorical into numbers # Checking the data types of each category heart.dtypes # Making the Categoricals into numericals # Since the precence of heart disease is the target, it will be renamed to target and changed into numerical heart["Target"] = heart["HeartDisease"].replace({"Yes": 1, "No": 0}).astype(int) dummies = pd.get_dummies( heart[["Smoking", "AlcoholDrinking", "Stroke", "DiffWalking", "Sex", "AgeCategory"]] ) dummies # combining the columns numerical = ["Target", "BMI", "PhysicalHealth", "MentalHealth"] cleaned_df = pd.concat([heart[numerical], dummies], axis=1) cleaned_df.head() # 3. Spliting up the data set # The target Y or the independent variable # the x is the independent varibles and the x is the features(excluding target) x = cleaned_df.drop("Target", axis=1) y = cleaned_df["Target"] # Spliting up the dataset into the test set and train set, the test size will be 20% from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) x_train.shape, x_test.shape, y_train.shape, y_test.shape # # RandomForestClassifier Analysis # Import algorithm from sklearn.ensemble import RandomForestClassifier np.random.seed(42) clf = RandomForestClassifier() clf.fit(x_train, y_train) # Check the score clf.score(x_test, y_test) # what is does is it makes the model go through all of the Xs and all of the combonations of numbers that causes the "target" to be "1" meaning what about the data causes heart disease np.array(y_test) # Comparing predictions to truth labels to evaluate model # this is the accuracy of the prediction model y_preds = clf.predict(x_test) np.mean(y_preds == y_test) from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report y_pred = clf.predict(x_test) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(8, 6)) sns.heatmap(cm, annot=True, fmt="d", cmap="Blues") plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.show() report = classification_report(y_test, y_pred) print("Classification Report:") print(report) # # Analysis importances = clf.feature_importances_ sorted_indices = np.argsort(importances) plt.figure(figsize=(8, 6)) sns.barplot(y=x_train.columns[sorted_indices], x=importances[sorted_indices]) plt.xlabel("Relative Importance") plt.ylabel("Features") plt.title("Feature Importances") plt.show() # # Predictor name = input("What is your Name: ") print("Hi", name, "!") ageinput = input("How old are you?") age = int(ageinput) if 18 <= age <= 24: age = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 25 <= age <= 29: age = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 30 <= age <= 34: age = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 35 <= age <= 39: age = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 40 <= age <= 44: age = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] elif 45 <= age <= 49: age = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0] elif 50 <= age <= 54: age = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0] elif 55 <= age <= 59: age = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] elif 60 <= age <= 64: age = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0] elif 65 <= age <= 79: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0] elif 70 <= age <= 74: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0] elif 79 <= age <= 84: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0] else: age = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0] sex = input("What is your sex?") if sex.lower() == "male": sex_input = [0, 1] elif sex.lower() == "female": sex_input = [1, 0] else: print("Please enter male or female") BMI = input("What is your BMI?") BMI = int(BMI) physicalhealth = input( "How many days within the last 30 days have you experienced physical illness and injury?" ) physicalhealth = int(physicalhealth) mentalhealth = input( "How many days within the last 30 days have you had issues with your mental health?" ) mentalhealth = int(mentalhealth) smoking = input("Do you smoke?") if smoking.lower() == "yes": smoking_input = [0, 1] elif smoking.lower() == "no": smoking_input = [1, 0] else: print("Invalid smoking input.") alcohol = input("Do you drink alcohol?") if alcohol.lower() == "yes": alcohol_input = [0, 1] elif alcohol.lower() == "no": alcohol_input = [1, 0] else: print("Invalid smoking input.") stroke = input("Have you ever had a stroke?") if stroke.lower() == "yes": stroke_input = [0, 1] elif stroke.lower() == "no": stroke_input = [1, 0] else: print("Invalid smoking input.") diff = input("Do you have difficulty walking or climbing stairs?") if diff.lower() == "yes": diff_input = [0, 1] elif diff.lower() == "no": diff_input = [1, 0] else: print("Invalid smoking input.") input_values = [BMI] input_values.append(physicalhealth) input_values.append(mentalhealth) input_values.extend(smoking_input) input_values.extend(alcohol_input) input_values.extend(stroke_input) input_values.extend(diff_input) input_values.extend(sex_input) input_values.extend(age) ## predictor new_input = [(input_values)] predictions = clf.predict(new_input) for prediction in predictions: if prediction == 0: print("It is likley you do not have heart disease.") else: print("It is likey you do/will have heart disease.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970729.ipynb
personal-key-indicators-of-heart-disease
kamilpytlak
[{"Id": 129970729, "ScriptId": 38631168, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13695340, "CreationDate": "05/17/2023 20:08:29", "VersionNumber": 2.0, "Title": "heartdiseaseproject", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 198.0, "LinesInsertedFromPrevious": 177.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 21.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186410039, "KernelVersionId": 129970729, "SourceDatasetVersionId": 3191579}]
[{"Id": 3191579, "DatasetId": 1936563, "DatasourceVersionId": 3241234, "CreatorUserId": 9492796, "LicenseName": "CC0: Public Domain", "CreationDate": "02/16/2022 10:18:03", "VersionNumber": 2.0, "Title": "Personal Key Indicators of Heart Disease", "Slug": "personal-key-indicators-of-heart-disease", "Subtitle": "2020 annual CDC survey data of 400k adults related to their health status", "Description": "# Key Indicators of Heart Disease\n## 2020 annual CDC survey data of 400k adults related to their health status\n\n### What topic does the dataset cover?\nAccording to the [CDC](https://www.cdc.gov/heartdisease/risk_factors.htm), heart disease is one of the leading causes of death for people of most races in the US (African Americans, American Indians and Alaska Natives, and white people). About half of all Americans (47%) have at least 1 of 3 key risk factors for heart disease: high blood pressure, high cholesterol, and smoking. Other key indicator include diabetic status, obesity (high BMI), not getting enough physical activity or drinking too much alcohol. Detecting and preventing the factors that have the greatest impact on heart disease is very important in healthcare. Computational developments, in turn, allow the application of machine learning methods to detect \"patterns\" from the data that can predict a patient's condition.\n\n### Where did the dataset come from and what treatments did it undergo?\nOriginally, the dataset come from the CDC and is a major part of the Behavioral Risk Factor Surveillance System (BRFSS), which conducts annual telephone surveys to gather data on the health status of U.S. residents. As the [CDC](https://www.cdc.gov/heartdisease/risk_factors.htm) describes: \"Established in 1984 with 15 states, BRFSS now collects data in all 50 states as well as the District of Columbia and three U.S. territories. BRFSS completes more than 400,000 adult interviews each year, making it the largest continuously conducted health survey system in the world.\". The most recent dataset (as of February 15, 2022) includes data from 2020. It consists of 401,958 rows and 279 columns. The vast majority of columns are questions asked to respondents about their health status, such as \"Do you have serious difficulty walking or climbing stairs?\" or \"Have you smoked at least 100 cigarettes in your entire life? [Note: 5 packs = 100 cigarettes]\". In this dataset, I noticed many different factors (questions) that directly or indirectly influence heart disease, so I decided to select the most relevant variables from it and do some cleaning so that it would be usable for machine learning projects.\n\n### What can you do with this dataset?\nAs described above, the original dataset of nearly 300 variables was reduced to just about 20 variables. In addition to classical EDA, this dataset can be used to apply a range of machine learning methods, most notably classifier models (logistic regression, SVM, random forest, etc.). You should treat the variable \"HeartDisease\" as a binary (\"Yes\" - respondent had heart disease; \"No\" - respondent had no heart disease). But note that classes are not balanced, so the classic model application approach is not advisable. Fixing the weights/undersampling should yield significantly betters results. Based on the dataset, I constructed a logistic regression model and embedded it in an application you might be inspired by: https://share.streamlit.io/kamilpytlak/heart-condition-checker/main/app.py. Can you indicate which variables have a significant effect on the likelihood of heart disease?", "VersionNotes": "Data Update 2022/02/16", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1936563, "CreatorUserId": 9492796, "OwnerUserId": 9492796.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3191579.0, "CurrentDatasourceVersionId": 3241234.0, "ForumId": 1960316, "Type": 2, "CreationDate": "02/15/2022 19:28:49", "LastActivityDate": "02/15/2022", "TotalViews": 320603, "TotalDownloads": 46135, "TotalVotes": 694, "TotalKernels": 186}]
[{"Id": 9492796, "UserName": "kamilpytlak", "DisplayName": "Kamil Pytlak", "RegisterDate": "01/25/2022", "PerformanceTier": 1}]
# tools used import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns heart = pd.read_csv("/kaggle/input/heart-disease-file/Heart_diease_dataset.csv") # the cleaned Data Set is called cleaned_df # # Cleaning and transforming the data # 1. Check if there are missing values and fill it or delete it # 2. converting non-numerical values into numbers # 3. split data between x and y # 1. Check if there are missing values heart.isna().sum() # 2. Need to change all the categorical into numbers # Checking the data types of each category heart.dtypes # Making the Categoricals into numericals # Since the precence of heart disease is the target, it will be renamed to target and changed into numerical heart["Target"] = heart["HeartDisease"].replace({"Yes": 1, "No": 0}).astype(int) dummies = pd.get_dummies( heart[["Smoking", "AlcoholDrinking", "Stroke", "DiffWalking", "Sex", "AgeCategory"]] ) dummies # combining the columns numerical = ["Target", "BMI", "PhysicalHealth", "MentalHealth"] cleaned_df = pd.concat([heart[numerical], dummies], axis=1) cleaned_df.head() # 3. Spliting up the data set # The target Y or the independent variable # the x is the independent varibles and the x is the features(excluding target) x = cleaned_df.drop("Target", axis=1) y = cleaned_df["Target"] # Spliting up the dataset into the test set and train set, the test size will be 20% from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) x_train.shape, x_test.shape, y_train.shape, y_test.shape # # RandomForestClassifier Analysis # Import algorithm from sklearn.ensemble import RandomForestClassifier np.random.seed(42) clf = RandomForestClassifier() clf.fit(x_train, y_train) # Check the score clf.score(x_test, y_test) # what is does is it makes the model go through all of the Xs and all of the combonations of numbers that causes the "target" to be "1" meaning what about the data causes heart disease np.array(y_test) # Comparing predictions to truth labels to evaluate model # this is the accuracy of the prediction model y_preds = clf.predict(x_test) np.mean(y_preds == y_test) from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report y_pred = clf.predict(x_test) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(8, 6)) sns.heatmap(cm, annot=True, fmt="d", cmap="Blues") plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.show() report = classification_report(y_test, y_pred) print("Classification Report:") print(report) # # Analysis importances = clf.feature_importances_ sorted_indices = np.argsort(importances) plt.figure(figsize=(8, 6)) sns.barplot(y=x_train.columns[sorted_indices], x=importances[sorted_indices]) plt.xlabel("Relative Importance") plt.ylabel("Features") plt.title("Feature Importances") plt.show() # # Predictor name = input("What is your Name: ") print("Hi", name, "!") ageinput = input("How old are you?") age = int(ageinput) if 18 <= age <= 24: age = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 25 <= age <= 29: age = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 30 <= age <= 34: age = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 35 <= age <= 39: age = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] elif 40 <= age <= 44: age = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] elif 45 <= age <= 49: age = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0] elif 50 <= age <= 54: age = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0] elif 55 <= age <= 59: age = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] elif 60 <= age <= 64: age = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0] elif 65 <= age <= 79: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0] elif 70 <= age <= 74: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0] elif 79 <= age <= 84: age = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0] else: age = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0] sex = input("What is your sex?") if sex.lower() == "male": sex_input = [0, 1] elif sex.lower() == "female": sex_input = [1, 0] else: print("Please enter male or female") BMI = input("What is your BMI?") BMI = int(BMI) physicalhealth = input( "How many days within the last 30 days have you experienced physical illness and injury?" ) physicalhealth = int(physicalhealth) mentalhealth = input( "How many days within the last 30 days have you had issues with your mental health?" ) mentalhealth = int(mentalhealth) smoking = input("Do you smoke?") if smoking.lower() == "yes": smoking_input = [0, 1] elif smoking.lower() == "no": smoking_input = [1, 0] else: print("Invalid smoking input.") alcohol = input("Do you drink alcohol?") if alcohol.lower() == "yes": alcohol_input = [0, 1] elif alcohol.lower() == "no": alcohol_input = [1, 0] else: print("Invalid smoking input.") stroke = input("Have you ever had a stroke?") if stroke.lower() == "yes": stroke_input = [0, 1] elif stroke.lower() == "no": stroke_input = [1, 0] else: print("Invalid smoking input.") diff = input("Do you have difficulty walking or climbing stairs?") if diff.lower() == "yes": diff_input = [0, 1] elif diff.lower() == "no": diff_input = [1, 0] else: print("Invalid smoking input.") input_values = [BMI] input_values.append(physicalhealth) input_values.append(mentalhealth) input_values.extend(smoking_input) input_values.extend(alcohol_input) input_values.extend(stroke_input) input_values.extend(diff_input) input_values.extend(sex_input) input_values.extend(age) ## predictor new_input = [(input_values)] predictions = clf.predict(new_input) for prediction in predictions: if prediction == 0: print("It is likley you do not have heart disease.") else: print("It is likey you do/will have heart disease.")
false
1
2,260
0
3,093
2,260
129970751
<jupyter_start><jupyter_text>TP2Dataset Kaggle dataset identifier: tp2dataset <jupyter_script>import numpy as np import pandas as pd df = pd.read_csv("/kaggle/input/tp2dataset/Tweets.csv") df.head() # ## Afficher les teweets ayant dess images comme media df[df.media == "Image"] # ## Afficher les nombres de tweets par media # ## Afficher les nombres de tweets par media df.groupby("media")["text"].count() # ## Afficher les 7 tweets les plus partagés le 28/08/2019 df[df.date == "28/08/2019"].sort_values("nb_retweet", ascending=False).head(7) # ## Afficher les 3 utilisateurs qui ont le plus des tweets df.groupby("user")["text"].count().sort_values(ascending=False).head(3) # ## afficher l'utilisateur avec les teweets les plus partagés en total df.groupby("user")["nb_retweet"].sum().sort_values(ascending=False).head(1) # ## l'utilisateur avec les tweets les plus partagés en moyenne df.groupby("user")["nb_retweet"].mean().sort_values(ascending=False).head(1) # ## les 3 utilisateurs qui publient les plus longues tweets en moyenne df["long"] = df.text.apply(len) df.groupby("user")["long"].mean().sort_values(ascending=False).head(3) # ## afficher le mois avec le plus d'images tweetés def mois(date): return date.split("/")[1] df["mois"] = df.date.apply(mois) df[df.media == "Image"].groupby("mois")["media"].count().sort_values( ascending=False ).head(1) # ## comparer le nombre de tweet avec et sans hachtag# df["hashtag"] = df.text.str.contains("#") df.groupby("hashtag")["text"].count() # ## dans quel semaine du mois(1,2,3,4) les utilisateurs tweet le plus def semaine(date): j = int(date.split("/")[0]) if j <= 7: return 1 elif j <= 14: return 2 elif j <= 21: return 3 else: return 4 df["semaine"] = df.date.apply(semaine) df.groupby("semaine")["nb_retweet"].count().sort_values(ascending=False).head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/970/129970751.ipynb
tp2dataset
ouahab7
[{"Id": 129970751, "ScriptId": 36107447, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13958568, "CreationDate": "05/17/2023 20:08:45", "VersionNumber": 1.0, "Title": "TP2 Data Science", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186410088, "KernelVersionId": 129970751, "SourceDatasetVersionId": 5134411}]
[{"Id": 5134411, "DatasetId": 2981720, "DatasourceVersionId": 5205967, "CreatorUserId": 11410132, "LicenseName": "Unknown", "CreationDate": "03/09/2023 12:15:26", "VersionNumber": 2.0, "Title": "TP2Dataset", "Slug": "tp2dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2023/03/09", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2981720, "CreatorUserId": 11410132, "OwnerUserId": 11410132.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5134411.0, "CurrentDatasourceVersionId": 5205967.0, "ForumId": 3020315, "Type": 2, "CreationDate": "03/09/2023 07:43:07", "LastActivityDate": "03/09/2023", "TotalViews": 148, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 11410132, "UserName": "ouahab7", "DisplayName": "ouahab7", "RegisterDate": "08/27/2022", "PerformanceTier": 1}]
import numpy as np import pandas as pd df = pd.read_csv("/kaggle/input/tp2dataset/Tweets.csv") df.head() # ## Afficher les teweets ayant dess images comme media df[df.media == "Image"] # ## Afficher les nombres de tweets par media # ## Afficher les nombres de tweets par media df.groupby("media")["text"].count() # ## Afficher les 7 tweets les plus partagés le 28/08/2019 df[df.date == "28/08/2019"].sort_values("nb_retweet", ascending=False).head(7) # ## Afficher les 3 utilisateurs qui ont le plus des tweets df.groupby("user")["text"].count().sort_values(ascending=False).head(3) # ## afficher l'utilisateur avec les teweets les plus partagés en total df.groupby("user")["nb_retweet"].sum().sort_values(ascending=False).head(1) # ## l'utilisateur avec les tweets les plus partagés en moyenne df.groupby("user")["nb_retweet"].mean().sort_values(ascending=False).head(1) # ## les 3 utilisateurs qui publient les plus longues tweets en moyenne df["long"] = df.text.apply(len) df.groupby("user")["long"].mean().sort_values(ascending=False).head(3) # ## afficher le mois avec le plus d'images tweetés def mois(date): return date.split("/")[1] df["mois"] = df.date.apply(mois) df[df.media == "Image"].groupby("mois")["media"].count().sort_values( ascending=False ).head(1) # ## comparer le nombre de tweet avec et sans hachtag# df["hashtag"] = df.text.str.contains("#") df.groupby("hashtag")["text"].count() # ## dans quel semaine du mois(1,2,3,4) les utilisateurs tweet le plus def semaine(date): j = int(date.split("/")[0]) if j <= 7: return 1 elif j <= 14: return 2 elif j <= 21: return 3 else: return 4 df["semaine"] = df.date.apply(semaine) df.groupby("semaine")["nb_retweet"].count().sort_values(ascending=False).head()
false
1
627
1
647
627
129761822
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import tensorflow as tf from tensorflow.keras import backend as K def transpose(a, perm=None, name=None): """ Transposes a according to perm, dealing automatically with sparsity. :param a: Tensor or SparseTensor with rank k. :param perm: permutation indices of size k. :param name: name for the operation. :return: Tensor or SparseTensor with rank k. """ if K.is_sparse(a): transpose_op = tf.sparse.transpose else: transpose_op = tf.transpose if perm is None: perm = (1, 0) # Make explicit so that shape will always be preserved return transpose_op(a, perm=perm, name=name) def reshape(a, shape=None, name=None): """ Reshapes a according to shape, dealing automatically with sparsity. :param a: Tensor or SparseTensor. :param shape: new shape. :param name: name for the operation. :return: Tensor or SparseTensor. """ if K.is_sparse(a): reshape_op = tf.sparse.reshape else: reshape_op = tf.reshape return reshape_op(a, shape=shape, name=name) def repeat(x, repeats): """ Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D tensors). :param x: rank 1 Tensor; :param repeats: rank 1 Tensor with same shape as x, the number of repetitions for each element; :return: rank 1 Tensor, of shape `(sum(repeats), )`. """ x = tf.expand_dims(x, 1) max_repeats = tf.reduce_max(repeats) tile_repeats = [1, max_repeats] arr_tiled = tf.tile(x, tile_repeats) mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1)) result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1]) return result def segment_top_k(x, I, ratio): """ Returns indices to get the top K values in x segment-wise, according to the segments defined in I. K is not fixed, but it is defined as a ratio of the number of elements in each segment. :param x: a rank 1 Tensor; :param I: a rank 1 Tensor with segment IDs for x; :param ratio: float, ratio of elements to keep for each segment; :return: a rank 1 Tensor containing the indices to get the top K values of each segment in x. """ rt = tf.RaggedTensor.from_value_rowids(x, I) row_lengths = rt.row_lengths() dense = rt.to_tensor(default_value=-np.inf) indices = tf.cast(tf.argsort(dense, direction="DESCENDING"), tf.int64) row_starts = tf.cast(rt.row_starts(), tf.int64) indices = indices + tf.expand_dims(row_starts, 1) row_lengths = tf.cast( tf.math.ceil(ratio * tf.cast(row_lengths, tf.float32)), tf.int32 ) return tf.RaggedTensor.from_tensor(indices, row_lengths).values def indices_to_mask(indices, shape, dtype=tf.bool): """ Return mask with true values at indices of the given shape. This can be used as an inverse to tf.where. :param indices: [nnz, k] or [nnz] Tensor indices of True values. :param shape: [k] or [] (scalar) Tensor shape/size of output. :param dtype: dtype of the output. :return: Tensor of given shape and dtype. """ indices = tf.convert_to_tensor(indices, dtype_hint=tf.int64) if indices.shape.ndims == 1: assert isinstance(shape, int) or shape.shape.ndims == 0 indices = tf.expand_dims(indices, axis=1) if isinstance(shape, int): shape = tf.TensorShape([shape]) else: shape = tf.expand_dims(shape, axis=0) else: indices.shape.assert_has_rank(2) assert indices.dtype.is_integer nnz = tf.shape(indices)[0] indices = tf.cast(indices, tf.int64) shape = tf.cast(shape, tf.int64) return tf.scatter_nd(indices, tf.ones((nnz,), dtype=dtype), shape) import tensorflow as tf from tensorflow.keras import backend as K SINGLE = 1 # Single mode rank(x) = 2, rank(a) = 2 DISJOINT = SINGLE # Disjoint mode rank(x) = 2, rank(a) = 2 BATCH = 3 # Batch mode rank(x) = 3, rank(a) = 3 MIXED = 4 # Mixed mode rank(x) = 3, rank(a) = 2 def disjoint_signal_to_batch(X, I): """ Converts a disjoint graph signal to batch node by zero-padding. :param X: Tensor, node features of shape (nodes, features). :param I: Tensor, graph IDs of shape `(n_nodes, )`; :return batch: Tensor, batched node features of shape (batch, N_max, n_node_features) """ I = tf.cast(I, tf.int32) num_nodes = tf.math.segment_sum(tf.ones_like(I), I) start_index = tf.cumsum(num_nodes, exclusive=True) n_graphs = tf.shape(num_nodes)[0] max_n_nodes = tf.reduce_max(num_nodes) batch_n_nodes = tf.shape(I)[0] feature_dim = tf.shape(X)[-1] index = tf.range(batch_n_nodes) index = (index - tf.gather(start_index, I)) + (I * max_n_nodes) dense = tf.zeros((n_graphs * max_n_nodes, feature_dim), dtype=X.dtype) dense = tf.tensor_scatter_nd_update(dense, index[..., None], X) batch = tf.reshape(dense, (n_graphs, max_n_nodes, feature_dim)) return batch def disjoint_adjacency_to_batch(A, I): """ Converts a disjoint adjacency matrix to batch node by zero-padding. :param A: Tensor, binary adjacency matrix of shape `(n_nodes, n_nodes)`; :param I: Tensor, graph IDs of shape `(n_nodes, )`; :return: Tensor, batched adjacency matrix of shape `(batch, N_max, N_max)`; """ I = tf.cast(I, tf.int64) indices = A.indices values = A.values i_nodes, j_nodes = indices[:, 0], indices[:, 1] graph_sizes = tf.math.segment_sum(tf.ones_like(I), I) max_n_nodes = tf.reduce_max(graph_sizes) n_graphs = tf.shape(graph_sizes)[0] offset = tf.gather(I, i_nodes) offset = tf.gather(tf.cumsum(graph_sizes, exclusive=True), offset) relative_j_nodes = j_nodes - offset relative_i_nodes = i_nodes - offset spaced_i_nodes = tf.gather(I, i_nodes) * max_n_nodes + relative_i_nodes new_indices = tf.transpose(tf.stack([spaced_i_nodes, relative_j_nodes])) n_graphs = tf.cast(n_graphs, new_indices.dtype) max_n_nodes = tf.cast(max_n_nodes, new_indices.dtype) dense_adjacency = tf.scatter_nd( new_indices, values, (n_graphs * max_n_nodes, max_n_nodes) ) batch = tf.reshape(dense_adjacency, (n_graphs, max_n_nodes, max_n_nodes)) return batch def autodetect_mode(x, a): """ Returns a code that identifies the data mode from the given node features and adjacency matrix(s). The output of this function can be used as follows: ```py from spektral.layers.ops import modes mode = modes.autodetect_mode(x, a) if mode == modes.SINGLE: print('Single!') elif mode == modes.BATCH: print('Batch!') elif mode == modes.MIXED: print('Mixed!') ``` :param x: Tensor or SparseTensor representing the node features :param a: Tensor or SparseTensor representing the adjacency matrix(s) :return: mode of operation as an integer code. """ x_ndim = K.ndim(x) a_ndim = K.ndim(a) if x_ndim == 2 and a_ndim == 2: return SINGLE elif x_ndim == 3 and a_ndim == 3: return BATCH elif x_ndim == 3 and a_ndim == 2: return MIXED else: raise ValueError( "Unknown mode for inputs x, a with ranks {} and {}" "respectively.".format(x_ndim, a_ndim) ) from tensorflow.keras import activations, constraints, initializers, regularizers LAYER_KWARGS = {"activation", "use_bias"} KERAS_KWARGS = { "trainable", "name", "dtype", "dynamic", "input_dim", "input_shape", "batch_input_shape", "batch_size", "weights", "activity_regularizer", "autocast", "implementation", } def is_layer_kwarg(key): return key not in KERAS_KWARGS and ( key.endswith("_initializer") or key.endswith("_regularizer") or key.endswith("_constraint") or key in LAYER_KWARGS ) def is_keras_kwarg(key): return key in KERAS_KWARGS def deserialize_kwarg(key, attr): if key.endswith("_initializer"): return initializers.get(attr) if key.endswith("_regularizer"): return regularizers.get(attr) if key.endswith("_constraint"): return constraints.get(attr) if key == "activation": return activations.get(attr) return attr def serialize_kwarg(key, attr): if key.endswith("_initializer"): return initializers.serialize(attr) if key.endswith("_regularizer"): return regularizers.serialize(attr) if key.endswith("_constraint"): return constraints.serialize(attr) if key == "activation": return activations.serialize(attr) if key == "use_bias": return attr import warnings from functools import wraps import tensorflow as tf from tensorflow.keras.layers import Layer # from spektral.utils.keras import ( # deserialize_kwarg, # is_keras_kwarg, # is_layer_kwarg, # serialize_kwarg, # ) class Conv(Layer): r""" A general class for convolutional layers. You can extend this class to create custom implementations of GNN layers that use standard matrix multiplication instead of the gather-scatter approach of MessagePassing. This is useful if you want to create layers that support dense inputs, batch and mixed modes, or other non-standard processing. No checks are done on the inputs, to allow for maximum flexibility. Any extension of this class must implement the `call(self, inputs)` and `config(self)` methods. **Arguments**: - ``**kwargs`: additional keyword arguments specific to Keras' Layers, like regularizers, initializers, constraints, etc. """ def __init__(self, **kwargs): super().__init__(**{k: v for k, v in kwargs.items() if is_keras_kwarg(k)}) self.supports_masking = True self.kwargs_keys = [] for key in kwargs: if is_layer_kwarg(key): attr = kwargs[key] attr = deserialize_kwarg(key, attr) self.kwargs_keys.append(key) setattr(self, key, attr) self.call = check_dtypes_decorator(self.call) def build(self, input_shape): self.built = True def call(self, inputs): raise NotImplementedError def get_config(self): base_config = super().get_config() keras_config = {} for key in self.kwargs_keys: keras_config[key] = serialize_kwarg(key, getattr(self, key)) return {**base_config, **keras_config, **self.config} @property def config(self): return {} @staticmethod def preprocess(a): return a def check_dtypes_decorator(call): @wraps(call) def _inner_check_dtypes(inputs, **kwargs): inputs = check_dtypes(inputs) return call(inputs, **kwargs) return _inner_check_dtypes def check_dtypes(inputs): for value in inputs: if not hasattr(value, "dtype"): # It's not a valid tensor. return inputs if len(inputs) == 2: x, a = inputs e = None elif len(inputs) == 3: x, a, e = inputs else: return inputs if a.dtype in (tf.int32, tf.int64) and x.dtype in ( tf.float16, tf.float32, tf.float64, ): warnings.warn( f"The adjacency matrix of dtype {a.dtype} is incompatible with the dtype " f"of the node features {x.dtype} and has been automatically cast to " f"{x.dtype}." ) a = tf.cast(a, x.dtype) output = [_ for _ in [x, a, e] if _ is not None] return output import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras import constraints, initializers, regularizers from tensorflow.keras.layers import Dropout # from spektral.layers import ops # from spektral.layers.convolutional.conv import Conv # from spektral.layers.ops import modes class GATConv(Conv): r""" A Graph Attention layer (GAT) from the paper > [Graph Attention Networks](https://arxiv.org/abs/1710.10903)<br> > Petar Veličković et al. **Mode**: single, disjoint, mixed, batch. **This layer expects dense inputs when working in batch mode.** This layer computes a convolution similar to `layers.GraphConv`, but uses the attention mechanism to weight the adjacency matrix instead of using the normalized Laplacian: $$ \X' = \mathbf{\alpha}\X\W + \b $$ where $$ \mathbf{\alpha}_{ij} =\frac{ \exp\left(\mathrm{LeakyReLU}\left( \a^{\top} [(\X\W)_i \, \| \, (\X\W)_j]\right)\right)}{\sum\limits_{k \in \mathcal{N}(i) \cup \{ i \}} \exp\left(\mathrm{LeakyReLU}\left( \a^{\top} [(\X\W)_i \, \| \, (\X\W)_k]\right)\right)} $$ where \(\a \in \mathbb{R}^{2F'}\) is a trainable attention kernel. Dropout is also applied to \(\alpha\) before computing \(\Z\). Parallel attention heads are computed in parallel and their results are aggregated by concatenation or average. **Input** - Node features of shape `([batch], n_nodes, n_node_features)`; - Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`; **Output** - Node features with the same shape as the input, but with the last dimension changed to `channels`; - if `return_attn_coef=True`, a list with the attention coefficients for each attention head. Each attention coefficient matrix has shape `([batch], n_nodes, n_nodes)`. **Arguments** - `channels`: number of output channels; - `attn_heads`: number of attention heads to use; - `concat_heads`: bool, whether to concatenate the output of the attention heads instead of averaging; - `dropout_rate`: internal dropout rate for attention coefficients; - `return_attn_coef`: if True, return the attention coefficients for the given input (one n_nodes x n_nodes matrix for each head). - `add_self_loops`: if True, add self loops to the adjacency matrix. - `activation`: activation function; - `use_bias`: bool, add a bias vector to the output; - `kernel_initializer`: initializer for the weights; - `attn_kernel_initializer`: initializer for the attention weights; - `bias_initializer`: initializer for the bias vector; - `kernel_regularizer`: regularization applied to the weights; - `attn_kernel_regularizer`: regularization applied to the attention kernels; - `bias_regularizer`: regularization applied to the bias vector; - `activity_regularizer`: regularization applied to the output; - `kernel_constraint`: constraint applied to the weights; - `attn_kernel_constraint`: constraint applied to the attention kernels; - `bias_constraint`: constraint applied to the bias vector. """ def __init__( self, channels, attn_heads=1, concat_heads=True, dropout_rate=0.5, return_attn_coef=False, add_self_loops=True, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", attn_kernel_initializer="glorot_uniform", kernel_regularizer=None, bias_regularizer=None, attn_kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, attn_kernel_constraint=None, **kwargs, ): super().__init__( activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs, ) self.channels = channels self.attn_heads = attn_heads self.concat_heads = concat_heads self.dropout_rate = dropout_rate self.return_attn_coef = return_attn_coef self.add_self_loops = add_self_loops self.attn_kernel_initializer = initializers.get(attn_kernel_initializer) self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer) self.attn_kernel_constraint = constraints.get(attn_kernel_constraint) if concat_heads: self.output_dim = self.channels * self.attn_heads else: self.output_dim = self.channels def build(self, input_shape): assert len(input_shape) >= 2 input_dim = input_shape[0][-1] self.kernel = self.add_weight( name="kernel", shape=[input_dim, self.attn_heads, self.channels], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, ) self.attn_kernel_self = self.add_weight( name="attn_kernel_self", shape=[self.channels, self.attn_heads, 1], initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, ) self.attn_kernel_neighs = self.add_weight( name="attn_kernel_neigh", shape=[self.channels, self.attn_heads, 1], initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, ) if self.use_bias: self.bias = self.add_weight( shape=[self.output_dim], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, name="bias", ) self.dropout = Dropout(self.dropout_rate, dtype=self.dtype) self.built = True def call(self, inputs, mask=None): x, a = inputs mode = autodetect_mode(x, a) if mode == SINGLE and K.is_sparse(a): output, attn_coef = self._call_single(x, a) else: if K.is_sparse(a): a = tf.sparse.to_dense(a) output, attn_coef = self._call_dense(x, a) if self.concat_heads: shape = tf.concat( (tf.shape(output)[:-2], [self.attn_heads * self.channels]), axis=0 ) output = tf.reshape(output, shape) else: output = tf.reduce_mean(output, axis=-2) if self.use_bias: output += self.bias if mask is not None: output *= mask[0] output = self.activation(output) if self.return_attn_coef: return output, attn_coef else: return output def _call_single(self, x, a): # Reshape kernels for efficient message-passing kernel = tf.reshape(self.kernel, (-1, self.attn_heads * self.channels)) attn_kernel_self = ops.transpose(self.attn_kernel_self, (2, 1, 0)) attn_kernel_neighs = ops.transpose(self.attn_kernel_neighs, (2, 1, 0)) # Prepare message-passing indices = a.indices N = tf.shape(x, out_type=indices.dtype)[-2] if self.add_self_loops: indices = ops.add_self_loops_indices(indices, N) targets, sources = indices[:, 1], indices[:, 0] # Update node features x = K.dot(x, kernel) x = tf.reshape(x, (-1, self.attn_heads, self.channels)) # Compute attention attn_for_self = tf.reduce_sum(x * attn_kernel_self, -1) attn_for_self = tf.gather(attn_for_self, targets) attn_for_neighs = tf.reduce_sum(x * attn_kernel_neighs, -1) attn_for_neighs = tf.gather(attn_for_neighs, sources) attn_coef = attn_for_self + attn_for_neighs attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2) attn_coef = ops.unsorted_segment_softmax(attn_coef, targets, N) attn_coef = self.dropout(attn_coef) attn_coef = attn_coef[..., None] # Update representation output = attn_coef * tf.gather(x, sources) output = tf.math.unsorted_segment_sum(output, targets, N) return output, attn_coef def _call_dense(self, x, a): shape = tf.shape(a)[:-1] if self.add_self_loops: a = tf.linalg.set_diag(a, tf.ones(shape, a.dtype)) x = tf.einsum("...NI , IHO -> ...NHO", x, self.kernel) attn_for_self = tf.einsum("...NHI , IHO -> ...NHO", x, self.attn_kernel_self) attn_for_neighs = tf.einsum( "...NHI , IHO -> ...NHO", x, self.attn_kernel_neighs ) attn_for_neighs = tf.einsum("...ABC -> ...CBA", attn_for_neighs) attn_coef = attn_for_self + attn_for_neighs attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2) mask = tf.where(a == 0.0, -10e9, 0.0) mask = tf.cast(mask, dtype=attn_coef.dtype) attn_coef += mask[..., None, :] attn_coef = tf.nn.softmax(attn_coef, axis=-1) attn_coef_drop = self.dropout(attn_coef) output = tf.einsum("...NHM , ...MHI -> ...NHI", attn_coef_drop, x) return output, attn_coef @property def config(self): return { "channels": self.channels, "attn_heads": self.attn_heads, "concat_heads": self.concat_heads, "dropout_rate": self.dropout_rate, "return_attn_coef": self.return_attn_coef, "attn_kernel_initializer": initializers.serialize( self.attn_kernel_initializer ), "attn_kernel_regularizer": regularizers.serialize( self.attn_kernel_regularizer ), "attn_kernel_constraint": constraints.serialize( self.attn_kernel_constraint ), } # 总结一键导出adjoint的代码 import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings("ignore") from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt # 导入数据 pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) train = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) test_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) # 处理训练集蛋白质数据 pro1 = ( pro.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) ) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) pro6 = ( train.merge(pro5, how="left", on="visit_id") .dropna(subset=["Q92823"]) .rename(columns={"patient_id_x": "patient_id", "visit_month_x": "visit_month"}) ) pro6 # 每个病人只保留第一次问诊时的数据 发现热力图基本不变 # pro7=pro6.drop_duplicates(subset=['patient_id'], keep='first', inplace=False ) pro7 = pro6[pro6["visit_month"] <= 36] # 外源性信息 将已经确定有关的蛋白质加进去 # SUPPLYMENT_FEATURE=['P00450','P10451','P01033','P01008','P02647','P01024','Q92876'] # FEATURES_PPI=list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) # FEATURES_PPI.append('visit_month') FEATURES_PPI = [ "O00533", "O00584", "O14498", "O14773", "O15240", "O15394", "O43505", "O60888", "O75144", "O94919", "P00441", "P00450", "P00734", "P00736", "P00738", "P00746", "P00747", "P00748", "P00751", "P01008", "P01009", "P01011", "P01019", "P01023", "P01024", "P01033", "P01034", "P01042", "P01344", "P01621", "P01717", "P01834", "P01857", "P01859", "P01860", "P01876", "P01877", "P02452", "P02647", "P02649", "P02652", "P02656", "P02671", "P02675", "P02679", "P02747", "P02748", "P02749", "P02750", "P02751", "P02753", "P02760", "P02763", "P02765", "P02766", "P02768", "P02774", "P02787", "P02790", "P04004", "P04075", "P04156", "P04180", "P04196", "P04207", "P04211", "P04216", "P04217", "P04275", "P04433", "P05060", "P05067", "P05090", "P05155", "P05156", "P05452", "P05546", "P06396", "P06681", "P06727", "P07195", "P07225", "P07339", "P07602", "P07711", "P07858", "P07998", "P08294", "P08493", "P08571", "P08603", "P08697", "P09486", "P09871", "P10451", "P10643", "P10645", "P10909", "P12109", "P13473", "P13521", "P13591", "P13611", "P13987", "P14174", "P14618", "P16035", "P16070", "P16870", "P17174", "P18065", "P19652", "P19823", "P20774", "P23142", "P24592", "P25311", "P35542", "P36222", "P36955", "P39060", "P40925", "P41222", "P43121", "P43251", "P43652", "P49908", "P51884", "P54289", "P55290", "P61278", "P61626", "P61769", "P61916", "P80748", "P98160", "Q06481", "Q08380", "Q12805", "Q12841", "Q12907", "Q13283", "Q13332", "Q13451", "Q14118", "Q14508", "Q14515", "Q14624", "Q16270", "Q16610", "Q6UXB8", "Q7Z3B1", "Q7Z5P9", "Q8IWV7", "Q8N2S1", "Q8NBJ4", "Q92520", "Q92823", "Q92876", "Q96KN2", "Q96PD5", "Q9BY67", "Q9NQ79", "Q9UBX5", "Q9UHG2", "Q9Y646", "Q9Y6R7", ] print(len(FEATURES_PPI)) # 生成相关性矩阵 data = pro7[FEATURES_PPI] corr = data.corr() corr # 生成临界矩阵 adjoint = corr.applymap(lambda x: 1 if x > 0.6 else 0) for i in range(len(adjoint.index)): for j in range(len(adjoint.columns)): if i == j: adjoint.iloc[i, j] = 0 # adjoint=corr adjoint c = [ [ 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ] c = pd.DataFrame(c) adjoint = c adjoint df_train_cli = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) df_train_pep = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) df_train_pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) df_test_cli = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test.csv" ) df_test_pep = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_peptides.csv" ) df_test_pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) df_test_sub = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/sample_submission.csv" ) # 训练集蛋白质数据 pro1 = ( pro.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) ) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) pro6 = ( train.merge(pro5, how="left", on="visit_id") .dropna(subset=["Q92823"]) .rename(columns={"patient_id_x": "patient_id", "visit_month_x": "visit_month"}) ) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype("float") finaldata target = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"] x = finaldata[FEATURES_PPI] y = finaldata[target] print(x.shape) print(y.shape) # 处理数据结构 GCN输入层要求为三维 adjoint1 = np.array(adjoint)[np.newaxis, :, :] adjoint2 = np.repeat(adjoint1, len(finaldata), 0) print(adjoint2.shape) # # adjoint.shape x1 = x.values[:, :, np.newaxis] print(x1.shape) # from spektral.layers import GCNConv import numpy as np import tensorflow as tf from tensorflow.keras.layers import ( Input, Dropout, Dense, Reshape, GlobalMaxPool1D, MaxPool1D, Flatten, ) from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations, regularizers, constraints, initializers # from spektral.layers import GCNConv # from spektral.utils import normalized_adjacency import tensorflow.keras.backend as K tf.keras.utils.set_random_seed(1234) tf.random.set_seed(1234) # class GCNConv(tf.keras.layers.Layer): # def __init__(self, # units, # activation=lambda x: x, # use_bias=True, # kernel_initializer='glorot_uniform', # bias_initializer='zeros', # **kwargs): # super(GCNConv, self).__init__() # self.units = units # self.activation = activations.get(activation) # self.use_bias = use_bias # self.kernel_initializer = initializers.get(kernel_initializer) # self.bias_initializer = initializers.get(bias_initializer) # def build(self, input_shape): # """ GCN has two inputs : [shape(An), shape(X)] # """ # fdim = input_shape[0][-1] # feature dim # # 初始化权重矩阵 # self.weight = self.add_weight(name="weight", # shape=(fdim, self.units), # initializer=self.kernel_initializer, # trainable=True) # if self.use_bias: # # 初始化偏置项 # self.bias = self.add_weight(name="bias", # shape=(self.units, ), # initializer=self.bias_initializer, # trainable=True) # def call(self, inputs): # """ GCN has two inputs : [An, X] # """ # self.An = inputs[1] # self.X = inputs[0] # # 计算 XW # if isinstance(self.X, tf.SparseTensor): # h = tf.sparse.sparse_dense_matmul(self.X, self.weight) # else: # h = tf.matmul(self.X, self.weight) # # 计算 AXW # if isinstance(self.An, tf.SparseTensor): # output = tf.sparse.sparse_dense_matmul(self.An, h) # else: # output = tf.matmul(self.An, h) # if self.use_bias: # output = tf.nn.bias_add(output, self.bias) # if self.activation: # output = self.activation(output) # print(output.shape) # return output def smape_loss(y_true, y_pred): epsilon = 0.1 y_true = y_true + 1 y_pred = y_pred + 1 numer = K.abs(y_pred - y_true) denom = K.maximum(K.abs(y_true) + K.abs(y_pred) + epsilon, 0.5 + epsilon) smape = numer / (denom / 2) * 100 smape = tf.where(tf.math.is_nan(smape), tf.zeros_like(smape), smape) return smape def calculate_smape(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) numer = np.round(np.abs(y_pred - y_true), 0) denom = np.round(np.abs(y_true) + np.abs(y_pred), 0) return 1 / len(y_true) * np.sum(np.nan_to_num(numer / (denom / 2))) * 100 def build_model(): X_in = Input(shape=(len(adjoint), 1)) A_in = Input((len(adjoint), len(adjoint)), sparse=True) X_1 = GATConv(116, activation="relu")([X_in, A_in]) X_2 = GlobalMaxPool1D()(X_1) def build_model(n_features): X_in = Input(shape=(len(adjoint), n_features)) A_in = Input((len(adjoint), len(adjoint)), sparse=False) # 第一层GCN X_1 = GATConv(1, activation="relu")([X_in, A_in]) # X_1 = Dropout(0.5)(X_1) # 第二层GCN # X_2 = GCNConv(64, activation='relu')([X_1, A_in]) # X_2 = Dropout(0.5)(X_2) X_2 = Flatten()(X_1) # 全连接层 X_3 = Dense(256, activation="relu")(X_2) X_3 = Dropout(0.3)(X_3) X_4 = Dense(256, activation="relu")(X_3) X_4 = Dropout(0.3)(X_4) X_5 = Dense(150, activation="relu")(X_4) X_5 = Dropout(0.3)(X_5) X_6 = Dense(150, activation="relu")(X_5) X_6 = Dropout(0.3)(X_6) X_7 = Dense(128, activation="relu")(X_6) X_7 = Dropout(0.3)(X_7) X_8 = Dense(128, activation="relu")(X_7) X_8 = Dropout(0.3)(X_8) # 输出层 output = Dense(4, activation="linear")(X_5) # 模型定义 model = Model(inputs=[X_in, A_in], outputs=output) return model model = build_model(1) optimizer = Adam(learning_rate=0.005) early_stopping = EarlyStopping(patience=10, restore_best_weights=True) model.compile(optimizer=optimizer, loss=smape_loss) model.summary() history = model.fit( [x1, adjoint2], y, epochs=50, validation_split=0.2, callbacks=[early_stopping] ) pd.DataFrame(history.history).plot() # 处理测试集 test_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) test = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) test_proteins1 = ( test_proteins.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) .drop(columns=["visit_month", "patient_id"]) ) test1 = test.merge(test_proteins1, how="left", on="visit_id") test2 = test1[["visit_id", "visit_month", "patient_id"] + FEATURES_ALL] # 第一步 先用中位数填补 test3 = test2 for i in FEATURES_ALL: test3.loc[:, i] = test3.loc[:, i].fillna(pro3.loc[:, i].median()) # 第二部,标准化 test4 = test3.copy() for i in FEATURES_ALL: test4.loc[:, i] = (test3.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) test5 = test4.drop_duplicates() test5 = test5.reset_index(drop=True) test5 def get_pred(test5): xtest1 = test5[FEATURES_PPI] xtest2 = xtest1.values.reshape(len(xtest1), len(FEATURES_PPI), 1) new_adjoint = adjoint.values.reshape(1, *adjoint.shape) new_adjoint1 = np.repeat(new_adjoint, len(xtest1), 0) result = model.predict([xtest2, new_adjoint1]) result = pd.DataFrame(result) return result get_pred(test5) def get_predictions(my_train): k = {} k[1] = 0.027 k[2] = 0.028 k[3] = 0.088 k[4] = 0 for u in target: my_train["result_" + str(u)] = 0 a = get_pred(my_train) if u == "updrs_1": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 0] * 0.3 + 5 * 0.7 elif u == "updrs_2": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 1] * 0.3 + 5 * 0.7 elif u == "updrs_3": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 2] * 0.3 + 21 * 0.7 elif u == "updrs_4": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = 0 # Format for final submission result = pd.DataFrame() for m in [0, 6, 12, 24]: for u in [1, 2, 3, 4]: temp = my_train[["visit_id", "result_updrs_" + str(u)]].copy() temp["prediction_id"] = ( temp["visit_id"] + "_updrs_" + str(u) + "_plus_" + str(m) + "_months" ) temp["rating"] = temp["result_updrs_" + str(u)] + k[u] * m temp = temp[["prediction_id", "rating"]] result = result.append(temp) result = result.drop_duplicates(subset=["prediction_id", "rating"]) return result import amp_pd_peptide # 导入名为 amp_pd_peptide 的模块 env = amp_pd_peptide.make_env() # 使用该模块的 make_env 函数创建一个环境 iter_test = env.iter_test() # 使用环境的 iter_test 方法创建一个迭代器,用于逐步遍历测试数据集 for test, test_peptides, test_proteins, sample_submission in iter_test: test_proteins1 = ( test_proteins.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX", ) .reset_index() .rename_axis(None, axis=1) .drop(columns=["visit_month", "patient_id"]) ) test1 = test.merge(test_proteins1, how="left", on="visit_id") for col in FEATURES_ALL: if col not in test1.columns: test1[col] = 0 test2 = test1[["visit_id", "visit_month", "patient_id"] + FEATURES_ALL] # 第一步 先用中位数填补 test3 = test2 for i in FEATURES_ALL: test3.loc[:, i] = test3.loc[:, i].fillna(pro3.loc[:, i].median()) # 第二部,归一化 test4 = test3.copy() for i in FEATURES_ALL: test4.loc[:, i] = (test3.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) test5 = test4.drop_duplicates() test5 = test5.reset_index(drop=True) test6 = test5.copy() final_result = get_predictions(test5) print(final_result) env.predict(final_result) # register your predictions#
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/761/129761822.ipynb
null
null
[{"Id": 129761822, "ScriptId": 35386713, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13786865, "CreationDate": "05/16/2023 09:38:16", "VersionNumber": 79.0, "Title": "notebook4adcfe6b80", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 2526.0, "LinesInsertedFromPrevious": 645.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1881.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import tensorflow as tf from tensorflow.keras import backend as K def transpose(a, perm=None, name=None): """ Transposes a according to perm, dealing automatically with sparsity. :param a: Tensor or SparseTensor with rank k. :param perm: permutation indices of size k. :param name: name for the operation. :return: Tensor or SparseTensor with rank k. """ if K.is_sparse(a): transpose_op = tf.sparse.transpose else: transpose_op = tf.transpose if perm is None: perm = (1, 0) # Make explicit so that shape will always be preserved return transpose_op(a, perm=perm, name=name) def reshape(a, shape=None, name=None): """ Reshapes a according to shape, dealing automatically with sparsity. :param a: Tensor or SparseTensor. :param shape: new shape. :param name: name for the operation. :return: Tensor or SparseTensor. """ if K.is_sparse(a): reshape_op = tf.sparse.reshape else: reshape_op = tf.reshape return reshape_op(a, shape=shape, name=name) def repeat(x, repeats): """ Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D tensors). :param x: rank 1 Tensor; :param repeats: rank 1 Tensor with same shape as x, the number of repetitions for each element; :return: rank 1 Tensor, of shape `(sum(repeats), )`. """ x = tf.expand_dims(x, 1) max_repeats = tf.reduce_max(repeats) tile_repeats = [1, max_repeats] arr_tiled = tf.tile(x, tile_repeats) mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1)) result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1]) return result def segment_top_k(x, I, ratio): """ Returns indices to get the top K values in x segment-wise, according to the segments defined in I. K is not fixed, but it is defined as a ratio of the number of elements in each segment. :param x: a rank 1 Tensor; :param I: a rank 1 Tensor with segment IDs for x; :param ratio: float, ratio of elements to keep for each segment; :return: a rank 1 Tensor containing the indices to get the top K values of each segment in x. """ rt = tf.RaggedTensor.from_value_rowids(x, I) row_lengths = rt.row_lengths() dense = rt.to_tensor(default_value=-np.inf) indices = tf.cast(tf.argsort(dense, direction="DESCENDING"), tf.int64) row_starts = tf.cast(rt.row_starts(), tf.int64) indices = indices + tf.expand_dims(row_starts, 1) row_lengths = tf.cast( tf.math.ceil(ratio * tf.cast(row_lengths, tf.float32)), tf.int32 ) return tf.RaggedTensor.from_tensor(indices, row_lengths).values def indices_to_mask(indices, shape, dtype=tf.bool): """ Return mask with true values at indices of the given shape. This can be used as an inverse to tf.where. :param indices: [nnz, k] or [nnz] Tensor indices of True values. :param shape: [k] or [] (scalar) Tensor shape/size of output. :param dtype: dtype of the output. :return: Tensor of given shape and dtype. """ indices = tf.convert_to_tensor(indices, dtype_hint=tf.int64) if indices.shape.ndims == 1: assert isinstance(shape, int) or shape.shape.ndims == 0 indices = tf.expand_dims(indices, axis=1) if isinstance(shape, int): shape = tf.TensorShape([shape]) else: shape = tf.expand_dims(shape, axis=0) else: indices.shape.assert_has_rank(2) assert indices.dtype.is_integer nnz = tf.shape(indices)[0] indices = tf.cast(indices, tf.int64) shape = tf.cast(shape, tf.int64) return tf.scatter_nd(indices, tf.ones((nnz,), dtype=dtype), shape) import tensorflow as tf from tensorflow.keras import backend as K SINGLE = 1 # Single mode rank(x) = 2, rank(a) = 2 DISJOINT = SINGLE # Disjoint mode rank(x) = 2, rank(a) = 2 BATCH = 3 # Batch mode rank(x) = 3, rank(a) = 3 MIXED = 4 # Mixed mode rank(x) = 3, rank(a) = 2 def disjoint_signal_to_batch(X, I): """ Converts a disjoint graph signal to batch node by zero-padding. :param X: Tensor, node features of shape (nodes, features). :param I: Tensor, graph IDs of shape `(n_nodes, )`; :return batch: Tensor, batched node features of shape (batch, N_max, n_node_features) """ I = tf.cast(I, tf.int32) num_nodes = tf.math.segment_sum(tf.ones_like(I), I) start_index = tf.cumsum(num_nodes, exclusive=True) n_graphs = tf.shape(num_nodes)[0] max_n_nodes = tf.reduce_max(num_nodes) batch_n_nodes = tf.shape(I)[0] feature_dim = tf.shape(X)[-1] index = tf.range(batch_n_nodes) index = (index - tf.gather(start_index, I)) + (I * max_n_nodes) dense = tf.zeros((n_graphs * max_n_nodes, feature_dim), dtype=X.dtype) dense = tf.tensor_scatter_nd_update(dense, index[..., None], X) batch = tf.reshape(dense, (n_graphs, max_n_nodes, feature_dim)) return batch def disjoint_adjacency_to_batch(A, I): """ Converts a disjoint adjacency matrix to batch node by zero-padding. :param A: Tensor, binary adjacency matrix of shape `(n_nodes, n_nodes)`; :param I: Tensor, graph IDs of shape `(n_nodes, )`; :return: Tensor, batched adjacency matrix of shape `(batch, N_max, N_max)`; """ I = tf.cast(I, tf.int64) indices = A.indices values = A.values i_nodes, j_nodes = indices[:, 0], indices[:, 1] graph_sizes = tf.math.segment_sum(tf.ones_like(I), I) max_n_nodes = tf.reduce_max(graph_sizes) n_graphs = tf.shape(graph_sizes)[0] offset = tf.gather(I, i_nodes) offset = tf.gather(tf.cumsum(graph_sizes, exclusive=True), offset) relative_j_nodes = j_nodes - offset relative_i_nodes = i_nodes - offset spaced_i_nodes = tf.gather(I, i_nodes) * max_n_nodes + relative_i_nodes new_indices = tf.transpose(tf.stack([spaced_i_nodes, relative_j_nodes])) n_graphs = tf.cast(n_graphs, new_indices.dtype) max_n_nodes = tf.cast(max_n_nodes, new_indices.dtype) dense_adjacency = tf.scatter_nd( new_indices, values, (n_graphs * max_n_nodes, max_n_nodes) ) batch = tf.reshape(dense_adjacency, (n_graphs, max_n_nodes, max_n_nodes)) return batch def autodetect_mode(x, a): """ Returns a code that identifies the data mode from the given node features and adjacency matrix(s). The output of this function can be used as follows: ```py from spektral.layers.ops import modes mode = modes.autodetect_mode(x, a) if mode == modes.SINGLE: print('Single!') elif mode == modes.BATCH: print('Batch!') elif mode == modes.MIXED: print('Mixed!') ``` :param x: Tensor or SparseTensor representing the node features :param a: Tensor or SparseTensor representing the adjacency matrix(s) :return: mode of operation as an integer code. """ x_ndim = K.ndim(x) a_ndim = K.ndim(a) if x_ndim == 2 and a_ndim == 2: return SINGLE elif x_ndim == 3 and a_ndim == 3: return BATCH elif x_ndim == 3 and a_ndim == 2: return MIXED else: raise ValueError( "Unknown mode for inputs x, a with ranks {} and {}" "respectively.".format(x_ndim, a_ndim) ) from tensorflow.keras import activations, constraints, initializers, regularizers LAYER_KWARGS = {"activation", "use_bias"} KERAS_KWARGS = { "trainable", "name", "dtype", "dynamic", "input_dim", "input_shape", "batch_input_shape", "batch_size", "weights", "activity_regularizer", "autocast", "implementation", } def is_layer_kwarg(key): return key not in KERAS_KWARGS and ( key.endswith("_initializer") or key.endswith("_regularizer") or key.endswith("_constraint") or key in LAYER_KWARGS ) def is_keras_kwarg(key): return key in KERAS_KWARGS def deserialize_kwarg(key, attr): if key.endswith("_initializer"): return initializers.get(attr) if key.endswith("_regularizer"): return regularizers.get(attr) if key.endswith("_constraint"): return constraints.get(attr) if key == "activation": return activations.get(attr) return attr def serialize_kwarg(key, attr): if key.endswith("_initializer"): return initializers.serialize(attr) if key.endswith("_regularizer"): return regularizers.serialize(attr) if key.endswith("_constraint"): return constraints.serialize(attr) if key == "activation": return activations.serialize(attr) if key == "use_bias": return attr import warnings from functools import wraps import tensorflow as tf from tensorflow.keras.layers import Layer # from spektral.utils.keras import ( # deserialize_kwarg, # is_keras_kwarg, # is_layer_kwarg, # serialize_kwarg, # ) class Conv(Layer): r""" A general class for convolutional layers. You can extend this class to create custom implementations of GNN layers that use standard matrix multiplication instead of the gather-scatter approach of MessagePassing. This is useful if you want to create layers that support dense inputs, batch and mixed modes, or other non-standard processing. No checks are done on the inputs, to allow for maximum flexibility. Any extension of this class must implement the `call(self, inputs)` and `config(self)` methods. **Arguments**: - ``**kwargs`: additional keyword arguments specific to Keras' Layers, like regularizers, initializers, constraints, etc. """ def __init__(self, **kwargs): super().__init__(**{k: v for k, v in kwargs.items() if is_keras_kwarg(k)}) self.supports_masking = True self.kwargs_keys = [] for key in kwargs: if is_layer_kwarg(key): attr = kwargs[key] attr = deserialize_kwarg(key, attr) self.kwargs_keys.append(key) setattr(self, key, attr) self.call = check_dtypes_decorator(self.call) def build(self, input_shape): self.built = True def call(self, inputs): raise NotImplementedError def get_config(self): base_config = super().get_config() keras_config = {} for key in self.kwargs_keys: keras_config[key] = serialize_kwarg(key, getattr(self, key)) return {**base_config, **keras_config, **self.config} @property def config(self): return {} @staticmethod def preprocess(a): return a def check_dtypes_decorator(call): @wraps(call) def _inner_check_dtypes(inputs, **kwargs): inputs = check_dtypes(inputs) return call(inputs, **kwargs) return _inner_check_dtypes def check_dtypes(inputs): for value in inputs: if not hasattr(value, "dtype"): # It's not a valid tensor. return inputs if len(inputs) == 2: x, a = inputs e = None elif len(inputs) == 3: x, a, e = inputs else: return inputs if a.dtype in (tf.int32, tf.int64) and x.dtype in ( tf.float16, tf.float32, tf.float64, ): warnings.warn( f"The adjacency matrix of dtype {a.dtype} is incompatible with the dtype " f"of the node features {x.dtype} and has been automatically cast to " f"{x.dtype}." ) a = tf.cast(a, x.dtype) output = [_ for _ in [x, a, e] if _ is not None] return output import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras import constraints, initializers, regularizers from tensorflow.keras.layers import Dropout # from spektral.layers import ops # from spektral.layers.convolutional.conv import Conv # from spektral.layers.ops import modes class GATConv(Conv): r""" A Graph Attention layer (GAT) from the paper > [Graph Attention Networks](https://arxiv.org/abs/1710.10903)<br> > Petar Veličković et al. **Mode**: single, disjoint, mixed, batch. **This layer expects dense inputs when working in batch mode.** This layer computes a convolution similar to `layers.GraphConv`, but uses the attention mechanism to weight the adjacency matrix instead of using the normalized Laplacian: $$ \X' = \mathbf{\alpha}\X\W + \b $$ where $$ \mathbf{\alpha}_{ij} =\frac{ \exp\left(\mathrm{LeakyReLU}\left( \a^{\top} [(\X\W)_i \, \| \, (\X\W)_j]\right)\right)}{\sum\limits_{k \in \mathcal{N}(i) \cup \{ i \}} \exp\left(\mathrm{LeakyReLU}\left( \a^{\top} [(\X\W)_i \, \| \, (\X\W)_k]\right)\right)} $$ where \(\a \in \mathbb{R}^{2F'}\) is a trainable attention kernel. Dropout is also applied to \(\alpha\) before computing \(\Z\). Parallel attention heads are computed in parallel and their results are aggregated by concatenation or average. **Input** - Node features of shape `([batch], n_nodes, n_node_features)`; - Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`; **Output** - Node features with the same shape as the input, but with the last dimension changed to `channels`; - if `return_attn_coef=True`, a list with the attention coefficients for each attention head. Each attention coefficient matrix has shape `([batch], n_nodes, n_nodes)`. **Arguments** - `channels`: number of output channels; - `attn_heads`: number of attention heads to use; - `concat_heads`: bool, whether to concatenate the output of the attention heads instead of averaging; - `dropout_rate`: internal dropout rate for attention coefficients; - `return_attn_coef`: if True, return the attention coefficients for the given input (one n_nodes x n_nodes matrix for each head). - `add_self_loops`: if True, add self loops to the adjacency matrix. - `activation`: activation function; - `use_bias`: bool, add a bias vector to the output; - `kernel_initializer`: initializer for the weights; - `attn_kernel_initializer`: initializer for the attention weights; - `bias_initializer`: initializer for the bias vector; - `kernel_regularizer`: regularization applied to the weights; - `attn_kernel_regularizer`: regularization applied to the attention kernels; - `bias_regularizer`: regularization applied to the bias vector; - `activity_regularizer`: regularization applied to the output; - `kernel_constraint`: constraint applied to the weights; - `attn_kernel_constraint`: constraint applied to the attention kernels; - `bias_constraint`: constraint applied to the bias vector. """ def __init__( self, channels, attn_heads=1, concat_heads=True, dropout_rate=0.5, return_attn_coef=False, add_self_loops=True, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", attn_kernel_initializer="glorot_uniform", kernel_regularizer=None, bias_regularizer=None, attn_kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, attn_kernel_constraint=None, **kwargs, ): super().__init__( activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs, ) self.channels = channels self.attn_heads = attn_heads self.concat_heads = concat_heads self.dropout_rate = dropout_rate self.return_attn_coef = return_attn_coef self.add_self_loops = add_self_loops self.attn_kernel_initializer = initializers.get(attn_kernel_initializer) self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer) self.attn_kernel_constraint = constraints.get(attn_kernel_constraint) if concat_heads: self.output_dim = self.channels * self.attn_heads else: self.output_dim = self.channels def build(self, input_shape): assert len(input_shape) >= 2 input_dim = input_shape[0][-1] self.kernel = self.add_weight( name="kernel", shape=[input_dim, self.attn_heads, self.channels], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, ) self.attn_kernel_self = self.add_weight( name="attn_kernel_self", shape=[self.channels, self.attn_heads, 1], initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, ) self.attn_kernel_neighs = self.add_weight( name="attn_kernel_neigh", shape=[self.channels, self.attn_heads, 1], initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, ) if self.use_bias: self.bias = self.add_weight( shape=[self.output_dim], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, name="bias", ) self.dropout = Dropout(self.dropout_rate, dtype=self.dtype) self.built = True def call(self, inputs, mask=None): x, a = inputs mode = autodetect_mode(x, a) if mode == SINGLE and K.is_sparse(a): output, attn_coef = self._call_single(x, a) else: if K.is_sparse(a): a = tf.sparse.to_dense(a) output, attn_coef = self._call_dense(x, a) if self.concat_heads: shape = tf.concat( (tf.shape(output)[:-2], [self.attn_heads * self.channels]), axis=0 ) output = tf.reshape(output, shape) else: output = tf.reduce_mean(output, axis=-2) if self.use_bias: output += self.bias if mask is not None: output *= mask[0] output = self.activation(output) if self.return_attn_coef: return output, attn_coef else: return output def _call_single(self, x, a): # Reshape kernels for efficient message-passing kernel = tf.reshape(self.kernel, (-1, self.attn_heads * self.channels)) attn_kernel_self = ops.transpose(self.attn_kernel_self, (2, 1, 0)) attn_kernel_neighs = ops.transpose(self.attn_kernel_neighs, (2, 1, 0)) # Prepare message-passing indices = a.indices N = tf.shape(x, out_type=indices.dtype)[-2] if self.add_self_loops: indices = ops.add_self_loops_indices(indices, N) targets, sources = indices[:, 1], indices[:, 0] # Update node features x = K.dot(x, kernel) x = tf.reshape(x, (-1, self.attn_heads, self.channels)) # Compute attention attn_for_self = tf.reduce_sum(x * attn_kernel_self, -1) attn_for_self = tf.gather(attn_for_self, targets) attn_for_neighs = tf.reduce_sum(x * attn_kernel_neighs, -1) attn_for_neighs = tf.gather(attn_for_neighs, sources) attn_coef = attn_for_self + attn_for_neighs attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2) attn_coef = ops.unsorted_segment_softmax(attn_coef, targets, N) attn_coef = self.dropout(attn_coef) attn_coef = attn_coef[..., None] # Update representation output = attn_coef * tf.gather(x, sources) output = tf.math.unsorted_segment_sum(output, targets, N) return output, attn_coef def _call_dense(self, x, a): shape = tf.shape(a)[:-1] if self.add_self_loops: a = tf.linalg.set_diag(a, tf.ones(shape, a.dtype)) x = tf.einsum("...NI , IHO -> ...NHO", x, self.kernel) attn_for_self = tf.einsum("...NHI , IHO -> ...NHO", x, self.attn_kernel_self) attn_for_neighs = tf.einsum( "...NHI , IHO -> ...NHO", x, self.attn_kernel_neighs ) attn_for_neighs = tf.einsum("...ABC -> ...CBA", attn_for_neighs) attn_coef = attn_for_self + attn_for_neighs attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2) mask = tf.where(a == 0.0, -10e9, 0.0) mask = tf.cast(mask, dtype=attn_coef.dtype) attn_coef += mask[..., None, :] attn_coef = tf.nn.softmax(attn_coef, axis=-1) attn_coef_drop = self.dropout(attn_coef) output = tf.einsum("...NHM , ...MHI -> ...NHI", attn_coef_drop, x) return output, attn_coef @property def config(self): return { "channels": self.channels, "attn_heads": self.attn_heads, "concat_heads": self.concat_heads, "dropout_rate": self.dropout_rate, "return_attn_coef": self.return_attn_coef, "attn_kernel_initializer": initializers.serialize( self.attn_kernel_initializer ), "attn_kernel_regularizer": regularizers.serialize( self.attn_kernel_regularizer ), "attn_kernel_constraint": constraints.serialize( self.attn_kernel_constraint ), } # 总结一键导出adjoint的代码 import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings("ignore") from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt # 导入数据 pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) train = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) test_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) # 处理训练集蛋白质数据 pro1 = ( pro.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) ) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) pro6 = ( train.merge(pro5, how="left", on="visit_id") .dropna(subset=["Q92823"]) .rename(columns={"patient_id_x": "patient_id", "visit_month_x": "visit_month"}) ) pro6 # 每个病人只保留第一次问诊时的数据 发现热力图基本不变 # pro7=pro6.drop_duplicates(subset=['patient_id'], keep='first', inplace=False ) pro7 = pro6[pro6["visit_month"] <= 36] # 外源性信息 将已经确定有关的蛋白质加进去 # SUPPLYMENT_FEATURE=['P00450','P10451','P01033','P01008','P02647','P01024','Q92876'] # FEATURES_PPI=list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) # FEATURES_PPI.append('visit_month') FEATURES_PPI = [ "O00533", "O00584", "O14498", "O14773", "O15240", "O15394", "O43505", "O60888", "O75144", "O94919", "P00441", "P00450", "P00734", "P00736", "P00738", "P00746", "P00747", "P00748", "P00751", "P01008", "P01009", "P01011", "P01019", "P01023", "P01024", "P01033", "P01034", "P01042", "P01344", "P01621", "P01717", "P01834", "P01857", "P01859", "P01860", "P01876", "P01877", "P02452", "P02647", "P02649", "P02652", "P02656", "P02671", "P02675", "P02679", "P02747", "P02748", "P02749", "P02750", "P02751", "P02753", "P02760", "P02763", "P02765", "P02766", "P02768", "P02774", "P02787", "P02790", "P04004", "P04075", "P04156", "P04180", "P04196", "P04207", "P04211", "P04216", "P04217", "P04275", "P04433", "P05060", "P05067", "P05090", "P05155", "P05156", "P05452", "P05546", "P06396", "P06681", "P06727", "P07195", "P07225", "P07339", "P07602", "P07711", "P07858", "P07998", "P08294", "P08493", "P08571", "P08603", "P08697", "P09486", "P09871", "P10451", "P10643", "P10645", "P10909", "P12109", "P13473", "P13521", "P13591", "P13611", "P13987", "P14174", "P14618", "P16035", "P16070", "P16870", "P17174", "P18065", "P19652", "P19823", "P20774", "P23142", "P24592", "P25311", "P35542", "P36222", "P36955", "P39060", "P40925", "P41222", "P43121", "P43251", "P43652", "P49908", "P51884", "P54289", "P55290", "P61278", "P61626", "P61769", "P61916", "P80748", "P98160", "Q06481", "Q08380", "Q12805", "Q12841", "Q12907", "Q13283", "Q13332", "Q13451", "Q14118", "Q14508", "Q14515", "Q14624", "Q16270", "Q16610", "Q6UXB8", "Q7Z3B1", "Q7Z5P9", "Q8IWV7", "Q8N2S1", "Q8NBJ4", "Q92520", "Q92823", "Q92876", "Q96KN2", "Q96PD5", "Q9BY67", "Q9NQ79", "Q9UBX5", "Q9UHG2", "Q9Y646", "Q9Y6R7", ] print(len(FEATURES_PPI)) # 生成相关性矩阵 data = pro7[FEATURES_PPI] corr = data.corr() corr # 生成临界矩阵 adjoint = corr.applymap(lambda x: 1 if x > 0.6 else 0) for i in range(len(adjoint.index)): for j in range(len(adjoint.columns)): if i == j: adjoint.iloc[i, j] = 0 # adjoint=corr adjoint c = [ [ 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, ], [ 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, ], [ 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ] c = pd.DataFrame(c) adjoint = c adjoint df_train_cli = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) df_train_pep = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) df_train_pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) df_test_cli = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test.csv" ) df_test_pep = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_peptides.csv" ) df_test_pro = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) df_test_sub = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/sample_submission.csv" ) # 训练集蛋白质数据 pro1 = ( pro.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) ) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) pro6 = ( train.merge(pro5, how="left", on="visit_id") .dropna(subset=["Q92823"]) .rename(columns={"patient_id_x": "patient_id", "visit_month_x": "visit_month"}) ) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype("float") finaldata target = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"] x = finaldata[FEATURES_PPI] y = finaldata[target] print(x.shape) print(y.shape) # 处理数据结构 GCN输入层要求为三维 adjoint1 = np.array(adjoint)[np.newaxis, :, :] adjoint2 = np.repeat(adjoint1, len(finaldata), 0) print(adjoint2.shape) # # adjoint.shape x1 = x.values[:, :, np.newaxis] print(x1.shape) # from spektral.layers import GCNConv import numpy as np import tensorflow as tf from tensorflow.keras.layers import ( Input, Dropout, Dense, Reshape, GlobalMaxPool1D, MaxPool1D, Flatten, ) from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations, regularizers, constraints, initializers # from spektral.layers import GCNConv # from spektral.utils import normalized_adjacency import tensorflow.keras.backend as K tf.keras.utils.set_random_seed(1234) tf.random.set_seed(1234) # class GCNConv(tf.keras.layers.Layer): # def __init__(self, # units, # activation=lambda x: x, # use_bias=True, # kernel_initializer='glorot_uniform', # bias_initializer='zeros', # **kwargs): # super(GCNConv, self).__init__() # self.units = units # self.activation = activations.get(activation) # self.use_bias = use_bias # self.kernel_initializer = initializers.get(kernel_initializer) # self.bias_initializer = initializers.get(bias_initializer) # def build(self, input_shape): # """ GCN has two inputs : [shape(An), shape(X)] # """ # fdim = input_shape[0][-1] # feature dim # # 初始化权重矩阵 # self.weight = self.add_weight(name="weight", # shape=(fdim, self.units), # initializer=self.kernel_initializer, # trainable=True) # if self.use_bias: # # 初始化偏置项 # self.bias = self.add_weight(name="bias", # shape=(self.units, ), # initializer=self.bias_initializer, # trainable=True) # def call(self, inputs): # """ GCN has two inputs : [An, X] # """ # self.An = inputs[1] # self.X = inputs[0] # # 计算 XW # if isinstance(self.X, tf.SparseTensor): # h = tf.sparse.sparse_dense_matmul(self.X, self.weight) # else: # h = tf.matmul(self.X, self.weight) # # 计算 AXW # if isinstance(self.An, tf.SparseTensor): # output = tf.sparse.sparse_dense_matmul(self.An, h) # else: # output = tf.matmul(self.An, h) # if self.use_bias: # output = tf.nn.bias_add(output, self.bias) # if self.activation: # output = self.activation(output) # print(output.shape) # return output def smape_loss(y_true, y_pred): epsilon = 0.1 y_true = y_true + 1 y_pred = y_pred + 1 numer = K.abs(y_pred - y_true) denom = K.maximum(K.abs(y_true) + K.abs(y_pred) + epsilon, 0.5 + epsilon) smape = numer / (denom / 2) * 100 smape = tf.where(tf.math.is_nan(smape), tf.zeros_like(smape), smape) return smape def calculate_smape(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) numer = np.round(np.abs(y_pred - y_true), 0) denom = np.round(np.abs(y_true) + np.abs(y_pred), 0) return 1 / len(y_true) * np.sum(np.nan_to_num(numer / (denom / 2))) * 100 def build_model(): X_in = Input(shape=(len(adjoint), 1)) A_in = Input((len(adjoint), len(adjoint)), sparse=True) X_1 = GATConv(116, activation="relu")([X_in, A_in]) X_2 = GlobalMaxPool1D()(X_1) def build_model(n_features): X_in = Input(shape=(len(adjoint), n_features)) A_in = Input((len(adjoint), len(adjoint)), sparse=False) # 第一层GCN X_1 = GATConv(1, activation="relu")([X_in, A_in]) # X_1 = Dropout(0.5)(X_1) # 第二层GCN # X_2 = GCNConv(64, activation='relu')([X_1, A_in]) # X_2 = Dropout(0.5)(X_2) X_2 = Flatten()(X_1) # 全连接层 X_3 = Dense(256, activation="relu")(X_2) X_3 = Dropout(0.3)(X_3) X_4 = Dense(256, activation="relu")(X_3) X_4 = Dropout(0.3)(X_4) X_5 = Dense(150, activation="relu")(X_4) X_5 = Dropout(0.3)(X_5) X_6 = Dense(150, activation="relu")(X_5) X_6 = Dropout(0.3)(X_6) X_7 = Dense(128, activation="relu")(X_6) X_7 = Dropout(0.3)(X_7) X_8 = Dense(128, activation="relu")(X_7) X_8 = Dropout(0.3)(X_8) # 输出层 output = Dense(4, activation="linear")(X_5) # 模型定义 model = Model(inputs=[X_in, A_in], outputs=output) return model model = build_model(1) optimizer = Adam(learning_rate=0.005) early_stopping = EarlyStopping(patience=10, restore_best_weights=True) model.compile(optimizer=optimizer, loss=smape_loss) model.summary() history = model.fit( [x1, adjoint2], y, epochs=50, validation_split=0.2, callbacks=[early_stopping] ) pd.DataFrame(history.history).plot() # 处理测试集 test_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) test = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv" ) test_proteins1 = ( test_proteins.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX" ) .reset_index() .rename_axis(None, axis=1) .drop(columns=["visit_month", "patient_id"]) ) test1 = test.merge(test_proteins1, how="left", on="visit_id") test2 = test1[["visit_id", "visit_month", "patient_id"] + FEATURES_ALL] # 第一步 先用中位数填补 test3 = test2 for i in FEATURES_ALL: test3.loc[:, i] = test3.loc[:, i].fillna(pro3.loc[:, i].median()) # 第二部,标准化 test4 = test3.copy() for i in FEATURES_ALL: test4.loc[:, i] = (test3.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) test5 = test4.drop_duplicates() test5 = test5.reset_index(drop=True) test5 def get_pred(test5): xtest1 = test5[FEATURES_PPI] xtest2 = xtest1.values.reshape(len(xtest1), len(FEATURES_PPI), 1) new_adjoint = adjoint.values.reshape(1, *adjoint.shape) new_adjoint1 = np.repeat(new_adjoint, len(xtest1), 0) result = model.predict([xtest2, new_adjoint1]) result = pd.DataFrame(result) return result get_pred(test5) def get_predictions(my_train): k = {} k[1] = 0.027 k[2] = 0.028 k[3] = 0.088 k[4] = 0 for u in target: my_train["result_" + str(u)] = 0 a = get_pred(my_train) if u == "updrs_1": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 0] * 0.3 + 5 * 0.7 elif u == "updrs_2": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 1] * 0.3 + 5 * 0.7 elif u == "updrs_3": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = a.iloc[:, 2] * 0.3 + 21 * 0.7 elif u == "updrs_4": mask = my_train[FEATURES_PPI].sum(axis=1) != 0 my_train.loc[mask, "result_" + str(u)] = 0 # Format for final submission result = pd.DataFrame() for m in [0, 6, 12, 24]: for u in [1, 2, 3, 4]: temp = my_train[["visit_id", "result_updrs_" + str(u)]].copy() temp["prediction_id"] = ( temp["visit_id"] + "_updrs_" + str(u) + "_plus_" + str(m) + "_months" ) temp["rating"] = temp["result_updrs_" + str(u)] + k[u] * m temp = temp[["prediction_id", "rating"]] result = result.append(temp) result = result.drop_duplicates(subset=["prediction_id", "rating"]) return result import amp_pd_peptide # 导入名为 amp_pd_peptide 的模块 env = amp_pd_peptide.make_env() # 使用该模块的 make_env 函数创建一个环境 iter_test = env.iter_test() # 使用环境的 iter_test 方法创建一个迭代器,用于逐步遍历测试数据集 for test, test_peptides, test_proteins, sample_submission in iter_test: test_proteins1 = ( test_proteins.pivot( index=["visit_id", "visit_month", "patient_id"], columns="UniProt", values="NPX", ) .reset_index() .rename_axis(None, axis=1) .drop(columns=["visit_month", "patient_id"]) ) test1 = test.merge(test_proteins1, how="left", on="visit_id") for col in FEATURES_ALL: if col not in test1.columns: test1[col] = 0 test2 = test1[["visit_id", "visit_month", "patient_id"] + FEATURES_ALL] # 第一步 先用中位数填补 test3 = test2 for i in FEATURES_ALL: test3.loc[:, i] = test3.loc[:, i].fillna(pro3.loc[:, i].median()) # 第二部,归一化 test4 = test3.copy() for i in FEATURES_ALL: test4.loc[:, i] = (test3.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[ :, i ].std(axis=0) test5 = test4.drop_duplicates() test5 = test5.reset_index(drop=True) test6 = test5.copy() final_result = get_predictions(test5) print(final_result) env.predict(final_result) # register your predictions#
false
0
97,097
0
97,097
97,097
129682021
from sklearn import datasets digits = datasets.load_digits() print(digits.DESCR) X = digits.data y = digits.target import pandas as pd df = pd.DataFrame(data=y, columns=["targets"]) df X.shape y.shape digits.images.shape digits.images[0] import matplotlib.pyplot as plt plt.imshow(digits.images[0], cmap=plt.cm.gray_r) plt.axis("off") plt.title("Number:" + str(y[0])) None figure, axes = plt.subplots(3, 10, figsize=(15, 6)) for ax, image, number in zip(axes.ravel(), digits.images, y): ax.axis("off") ax.imshow(image, cmap=plt.cm.gray_r) ax.set_title("Number:" + str(number)) image = digits.images[0] print("original image data=") print(image) print() image_flattened = image.ravel() print("flattened image = ") print(image_flattened) print() print("feature data for a sample= ") print(X[0]) print() print("Feature data for all samples is a 8-by-8 two dimensional array= ") print(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=99, stratify=y ) X_train.shape X_test.shape y_train.shape y_test.shape from sklearn.naive_bayes import BernoulliNB ber = BernoulliNB() ber.fit(X_train, y_train) y_pred = ber.predict(X_test) from sklearn.metrics import accuracy_score acc1 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc1)) from sklearn.naive_bayes import GaussianNB gaus = GaussianNB() gaus.fit(X_train, y_train) y_pred = gaus.predict(X_test) from sklearn.metrics import accuracy_score acc2 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc2)) from sklearn.naive_bayes import MultinomialNB mul = MultinomialNB() mul.fit(X_train, y_train) y_pred = mul.predict(X_test) from sklearn.metrics import accuracy_score acc3 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc3)) models = pd.DataFrame( { "Model": ["Bernoulli NB", "Gaussian NB", "MultinomialNB"], "Score": [acc1, acc2, acc3], } ) models.sort_values(by="Score", ascending=False) # # FROM SCRATCH class NaiveBayes: def fit(self, X, y): n_samples, n_features = X.shape self._classes = np.unique(y) n_classes = len(self._classes) # calculate mean, var, and prior for each class self._mean = np.zeros((n_classes, n_features), dtype=np.float64) self._var = np.zeros((n_classes, n_features), dtype=np.float64) self._priors = np.zeros(n_classes, dtype=np.float64) for idx, c in enumerate(self._classes): X_c = X[y == c] self._mean[idx, :] = X_c.mean(axis=0) self._var[idx, :] = X_c.var(axis=0) self._priors[idx] = X_c.shape[0] / float(n_samples) def predict(self, X): y_pred = [self._predict(x) for x in X] return np.array(y_pred) def _predict(self, x): posteriors = [] # calculate posterior probability for each class for idx, c in enumerate(self._classes): prior = np.log(self._priors[idx]) posterior = np.sum(np.log(self._pdf(idx, x))) posterior = posterior + prior posteriors.append(posterior) # return class with the highest posterior return self._classes[np.argmax(posteriors)] def _pdf(self, class_idx, x): mean = self._mean[class_idx] var = self._var[class_idx] numerator = np.exp(-((x - mean) ** 2) / (2 * var)) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator def accuracy(Y_test, Y_pred): accuracy = np.sum(Y_test == Y_pred) / len(Y_test) return accuracy import numpy as np nb = NaiveBayes() nb.fit(X_train, y_train) y_pred = nb.predict(X_test) acc_scratch = accuracy(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc_scratch))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/682/129682021.ipynb
null
null
[{"Id": 129682021, "ScriptId": 38563272, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13482734, "CreationDate": "05/15/2023 17:52:04", "VersionNumber": 1.0, "Title": "Digit Recognizer", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 142.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from sklearn import datasets digits = datasets.load_digits() print(digits.DESCR) X = digits.data y = digits.target import pandas as pd df = pd.DataFrame(data=y, columns=["targets"]) df X.shape y.shape digits.images.shape digits.images[0] import matplotlib.pyplot as plt plt.imshow(digits.images[0], cmap=plt.cm.gray_r) plt.axis("off") plt.title("Number:" + str(y[0])) None figure, axes = plt.subplots(3, 10, figsize=(15, 6)) for ax, image, number in zip(axes.ravel(), digits.images, y): ax.axis("off") ax.imshow(image, cmap=plt.cm.gray_r) ax.set_title("Number:" + str(number)) image = digits.images[0] print("original image data=") print(image) print() image_flattened = image.ravel() print("flattened image = ") print(image_flattened) print() print("feature data for a sample= ") print(X[0]) print() print("Feature data for all samples is a 8-by-8 two dimensional array= ") print(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=99, stratify=y ) X_train.shape X_test.shape y_train.shape y_test.shape from sklearn.naive_bayes import BernoulliNB ber = BernoulliNB() ber.fit(X_train, y_train) y_pred = ber.predict(X_test) from sklearn.metrics import accuracy_score acc1 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc1)) from sklearn.naive_bayes import GaussianNB gaus = GaussianNB() gaus.fit(X_train, y_train) y_pred = gaus.predict(X_test) from sklearn.metrics import accuracy_score acc2 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc2)) from sklearn.naive_bayes import MultinomialNB mul = MultinomialNB() mul.fit(X_train, y_train) y_pred = mul.predict(X_test) from sklearn.metrics import accuracy_score acc3 = accuracy_score(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc3)) models = pd.DataFrame( { "Model": ["Bernoulli NB", "Gaussian NB", "MultinomialNB"], "Score": [acc1, acc2, acc3], } ) models.sort_values(by="Score", ascending=False) # # FROM SCRATCH class NaiveBayes: def fit(self, X, y): n_samples, n_features = X.shape self._classes = np.unique(y) n_classes = len(self._classes) # calculate mean, var, and prior for each class self._mean = np.zeros((n_classes, n_features), dtype=np.float64) self._var = np.zeros((n_classes, n_features), dtype=np.float64) self._priors = np.zeros(n_classes, dtype=np.float64) for idx, c in enumerate(self._classes): X_c = X[y == c] self._mean[idx, :] = X_c.mean(axis=0) self._var[idx, :] = X_c.var(axis=0) self._priors[idx] = X_c.shape[0] / float(n_samples) def predict(self, X): y_pred = [self._predict(x) for x in X] return np.array(y_pred) def _predict(self, x): posteriors = [] # calculate posterior probability for each class for idx, c in enumerate(self._classes): prior = np.log(self._priors[idx]) posterior = np.sum(np.log(self._pdf(idx, x))) posterior = posterior + prior posteriors.append(posterior) # return class with the highest posterior return self._classes[np.argmax(posteriors)] def _pdf(self, class_idx, x): mean = self._mean[class_idx] var = self._var[class_idx] numerator = np.exp(-((x - mean) ** 2) / (2 * var)) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator def accuracy(Y_test, Y_pred): accuracy = np.sum(Y_test == Y_pred) / len(Y_test) return accuracy import numpy as np nb = NaiveBayes() nb.fit(X_train, y_train) y_pred = nb.predict(X_test) acc_scratch = accuracy(y_test, y_pred) * 100 print("Accuracy of the model: {0}%".format(acc_scratch))
false
0
1,318
0
1,318
1,318
129682576
<jupyter_start><jupyter_text>eCommerce behavior data from multi category store ### About This file contaisn behavior data for 7 months (from October 2019 to April 2020) from a large multi-category online store. Each row in the file represents an event. All events are related to products and users. Each event is like many-to-many relation between products and users. Data collected by [Open CDP](https://rees46.com/en/open-cdp) project. Feel free to use open source customer data platform. ### More datasets Checkout another datasets: 1. https://www.kaggle.com/mkechinov/ecommerce-behavior-data-from-multi-category-store - you're reading it right now 2. https://www.kaggle.com/mkechinov/ecommerce-purchase-history-from-electronics-store 3. https://www.kaggle.com/mkechinov/ecommerce-events-history-in-cosmetics-shop 4. https://www.kaggle.com/mkechinov/ecommerce-purchase-history-from-jewelry-store 5. https://www.kaggle.com/mkechinov/ecommerce-events-history-in-electronics-store 6. [NEW] https://www.kaggle.com/datasets/mkechinov/direct-messaging ### How to read it There are different types of events. See below. Semantics (or how to read it): &gt; User **user_id** during session **user_session** added to shopping cart (property **event_type** is equal **cart**) product **product_id** of brand **brand** of category **category_code** (**category_code**) with price **price** at **event_time** ### More datasets Due to Kaggle's limit to max 20Gb of files per dataset, I can' upload more data to this dataset. Here you can find additional archives (Dec 2019 - Apr 2020): * [2019-Oct.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Oct.csv.gz) (1.62Gb) * [2019-Nov.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Nov.csv.gz) (2.69Gb) * [2019-Dec.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Dec.csv.gz) (2.74Gb) * [2020-Jan.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Jan.csv.gz) (2.23Gb) * [2020-Feb.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Feb.csv.gz) (2.19Gb) * [2020-Mar.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Mar.csv.gz) (2.25Gb) * [2020-Apr.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Apr.csv.gz) (2.73Gb) ### File structure | Property | Description | | --- | --- | |**event_time**|Time when event happened at (in UTC).| |**event_type**|Only one kind of event: purchase.| |**product_id**|ID of a product| |**category_id**|Product's category ID| |**category_code**|Product's category taxonomy (code name) if it was possible to make it. Usually present for meaningful categories and skipped for different kinds of accessories.| |**brand**|Downcased string of brand name. Can be missed.| |**price**|Float price of a product. Present.| |**user_id**|Permanent user ID.| |** user_session**|Temporary user's session ID. Same for each user's session. Is changed every time user come back to online store from a long pause.| ### Event types Events can be: - `view` - a user viewed a product - `cart` - a user added a product to shopping cart - `remove_from_cart` - a user removed a product from shopping cart - `purchase` - a user purchased a product ### Multiple purchases per session A session can have multiple **purchase** events. It's ok, because it's a single order. ### Many thanks Thanks to [REES46 Marketing Platform](https://rees46.com) for this dataset. ### Using datasets in your works, books, education materials You can use this dataset for free. Just mention the source of it: link to this page and link to [REES46 Marketing Platform](https://rees46.com). Kaggle dataset identifier: ecommerce-behavior-data-from-multi-category-store <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import plotly.express as px from datetime import datetime import warnings warnings.filterwarnings("ignore") plt.style.use("fivethirtyeight") pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 150) pd.options.display.float_format = "{:,}".format import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) dtype = { "user_id": "uint32", "product_id": "uint32", "event_type": "category", "category_code": "category", "brand": "category", "user_session": "category", } file = "/kaggle/input/ecommerce-behavior-data-from-multi-category-store/2019-Nov.csv" df = pd.read_csv(file, dtype=dtype) df df.info() df.describe() # # DATA CLEANING # converting event time to date and hour only df.insert( loc=1, column="date_utc", value=pd.to_datetime(df["event_time"].apply(lambda s: str(s)[0:10])), ) df.insert( loc=2, column="hour", value=(df["event_time"].apply(lambda s: str(s)[11:13])).astype("uint8"), ) df = df.drop("event_time", axis=1) df # visualizing null values msno.bar(df) plt.show() df = df.fillna("unknown") df.isna().sum() # we're gonna remove items with 'purchase' event type in our dataframe purchase = df[df["event_type"] == "purchase"] # removing purchase data from original data then dropping duplicates df = df[df["event_type"] != "purchase"].drop_duplicates() # concatenating both data df = pd.concat([df, purchase], ignore_index=True) df # # EDA # Knowing Your Customers # Q1: Customer Behavior Analysis # Visitors, Possible Customers & Buyers visitors = ( df.groupby("event_type")["user_id"] .agg(["count"]) .sort_values(by="count", ascending=False) .rename(columns={"count": "count_of_users"}) .reset_index() ) visitors["prcnt"] = ( 100 * visitors["count_of_users"] / visitors["count_of_users"].sum() ).round(1) print(visitors) px.pie(visitors, values="prcnt", names="event_type", template="plotly_dark") # # Customer activity is stable over the month & increased at Nov 14-17, 2019 (wednesday-sunday) # engagement of customers towards the store (view, cart, purchase) count activity = ( df.groupby(["date_utc", "hour"])["user_id"] .agg(["count"]) .reset_index() .sort_values(by=["date_utc", "hour"]) ) activity["time"] = pd.to_datetime(activity["hour"], format="%H").dt.strftime("%I %p") activity["week_day"] = activity["date_utc"].dt.day_name() activity["day"] = activity["date_utc"].dt.day_of_week activity month_activity = activity.groupby("date_utc")["count"].agg(["sum"]).reset_index() fig = px.bar( month_activity, x="date_utc", y="sum", title="Sum of Users Over the Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() week_activity = ( activity.groupby(["week_day", "day"])["count"] .agg(["mean"]) .round() .astype("uint32") .rename(columns={"mean": "average_user_by_day"}) .sort_values(by="day") .reset_index() ) fig1 = px.line( week_activity, x="week_day", y="average_user_by_day", text="average_user_by_day", title="Average User Count by Day", template="plotly_dark", ) fig1.update_xaxes(type="category") fig1.show() time_activity = ( activity.groupby(["hour", "time"])["count"] .agg(["mean"]) .round() .astype("uint32") .rename(columns={"mean": "average_users_by_hour"}) .reset_index() ) fig2 = px.line( time_activity, x="time", y="average_users_by_hour", title="Average User Count by Hour", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear", type="category") fig2.show() # # Q2: Store's Revenue Analysis purchase revenue = purchase.groupby(["date_utc", "hour"])["price"].sum().reset_index().round(2) revenue["time"] = pd.to_datetime(revenue["hour"], format="%H").dt.strftime("%I %p") revenue["week_day"] = revenue["date_utc"].dt.day_name() revenue["day"] = revenue["date_utc"].dt.day_of_week revenue.head() rev_month = revenue.groupby(["date_utc"])["price"].sum().round(2).reset_index() fig = px.bar( rev_month, x="date_utc", y="price", title="Revenue Over The Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() rev_week = ( revenue.groupby(["day", "week_day"])["price"].agg(["mean"]).round(2).reset_index() ) fig1 = px.line( rev_week, x="week_day", y="mean", text="mean", title="Average Revenue Over by Day Of Week", template="plotly_dark", ) fig1.update_xaxes(tickmode="linear") fig1.show() rev_hour = revenue.groupby(["hour", "time"])["price"].mean().round(2).reset_index() fig2 = px.line( rev_hour, x="time", y="price", text="price", title="Average Revenue Over The Hour", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear") fig2.show() rev_hour = ( purchase.groupby(["date_utc", "hour"])["user_id"].agg(["count"]).reset_index() ) rev_hour = ( rev_hour.groupby("hour")["count"] .agg(["mean"]) .round() .astype("uint16") .rename(columns={"mean": "average_purchase_activity"}) .reset_index() ) rev_hour["time"] = pd.to_datetime(rev_hour["hour"], format="%H").dt.strftime("%I %p") fig = px.line( rev_hour, x="time", y="average_purchase_activity", text="average_purchase_activity", title="Average Purchase Activity Over The Hour For The Whole Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() item = ( purchase.groupby("brand")["price"] .agg(["sum"]) .sort_values(by="sum", ascending=False) .round(2) .reset_index() .iloc[0:10] ) item["brand"] = item["brand"].str.capitalize() fig1 = px.bar( item, x="brand", y="sum", title="Top 10 Brands in terms of Revenue", template="plotly_dark", ) fig1.update_xaxes(tickmode="linear") fig1.show() p_item = purchase["brand"].value_counts().reset_index().iloc[0:10] p_item["brand"] = p_item["brand"].str.capitalize() fig2 = px.bar( p_item, x="brand", y="count", text="count", title="Top 10 Brands in Terms of Purchase Count", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear") fig2.show() loyal = ( purchase.groupby(["user_id"])["price"] .agg(["sum"]) .round(2) .rename(columns={"sum": "amount_spent"}) .sort_values(by="amount_spent", ascending=False) .reset_index() .iloc[0:10] ) fig3 = px.bar( loyal, x="user_id", y="amount_spent", text="amount_spent", title="Top 10 Customers via Amount Spent", template="plotly_dark", ) fig3.update_xaxes(tickmode="linear", type="category") fig3.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/682/129682576.ipynb
ecommerce-behavior-data-from-multi-category-store
mkechinov
[{"Id": 129682576, "ScriptId": 38555842, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13754027, "CreationDate": "05/15/2023 17:57:43", "VersionNumber": 1.0, "Title": "Ecommerce Behavior", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186003796, "KernelVersionId": 129682576, "SourceDatasetVersionId": 835452}]
[{"Id": 835452, "DatasetId": 411512, "DatasourceVersionId": 858240, "CreatorUserId": 1884116, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "12/09/2019 20:43:39", "VersionNumber": 8.0, "Title": "eCommerce behavior data from multi category store", "Slug": "ecommerce-behavior-data-from-multi-category-store", "Subtitle": "This dataset contains 285 million users' events from eCommerce website", "Description": "### About\n\nThis file contaisn behavior data for 7 months (from October 2019 to April 2020) from a large multi-category online store. \n\nEach row in the file represents an event. All events are related to products and users. Each event is like many-to-many relation between products and users.\n\nData collected by [Open CDP](https://rees46.com/en/open-cdp) project. Feel free to use open source customer data platform.\n\n### More datasets\n\nCheckout another datasets:\n\n1. https://www.kaggle.com/mkechinov/ecommerce-behavior-data-from-multi-category-store - you're reading it right now\n2. https://www.kaggle.com/mkechinov/ecommerce-purchase-history-from-electronics-store\n3. https://www.kaggle.com/mkechinov/ecommerce-events-history-in-cosmetics-shop\n4. https://www.kaggle.com/mkechinov/ecommerce-purchase-history-from-jewelry-store\n5. https://www.kaggle.com/mkechinov/ecommerce-events-history-in-electronics-store\n6. [NEW] https://www.kaggle.com/datasets/mkechinov/direct-messaging\n\n### How to read it\n\nThere are different types of events. See below.\n\nSemantics (or how to read it): \n\n&gt; User **user_id** during session **user_session** added to shopping cart (property **event_type** is equal **cart**) product **product_id** of brand **brand** of category **category_code** (**category_code**) with price **price** at **event_time**\n\n### More datasets\n\nDue to Kaggle's limit to max 20Gb of files per dataset, I can' upload more data to this dataset. Here you can find additional archives (Dec 2019 - Apr 2020):\n\n* [2019-Oct.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Oct.csv.gz) (1.62Gb)\n* [2019-Nov.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Nov.csv.gz) (2.69Gb)\n* [2019-Dec.csv.gz](https://data.rees46.com/datasets/marketplace/2019-Dec.csv.gz) (2.74Gb)\n* [2020-Jan.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Jan.csv.gz) (2.23Gb)\n* [2020-Feb.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Feb.csv.gz) (2.19Gb)\n* [2020-Mar.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Mar.csv.gz) (2.25Gb)\n* [2020-Apr.csv.gz](https://data.rees46.com/datasets/marketplace/2020-Apr.csv.gz) (2.73Gb)\n\n### File structure\n\n| Property | Description |\n| --- | --- |\n|**event_time**|Time when event happened at (in UTC).|\n|**event_type**|Only one kind of event: purchase.|\n|**product_id**|ID of a product|\n|**category_id**|Product's category ID|\n|**category_code**|Product's category taxonomy (code name) if it was possible to make it. Usually present for meaningful categories and skipped for different kinds of accessories.|\n|**brand**|Downcased string of brand name. Can be missed.|\n|**price**|Float price of a product. Present.|\n|**user_id**|Permanent user ID.|\n|** user_session**|Temporary user's session ID. Same for each user's session. Is changed every time user come back to online store from a long pause.|\n\n### Event types\n\nEvents can be:\n\n- `view` - a user viewed a product\n- `cart` - a user added a product to shopping cart\n- `remove_from_cart` - a user removed a product from shopping cart\n- `purchase` - a user purchased a product\n\n### Multiple purchases per session\n\nA session can have multiple **purchase** events. It's ok, because it's a single order.\n\n### Many thanks\n\nThanks to [REES46 Marketing Platform](https://rees46.com) for this dataset.\n\n### Using datasets in your works, books, education materials\n\nYou can use this dataset for free. Just mention the source of it: link to this page and link to [REES46 Marketing Platform](https://rees46.com).", "VersionNotes": "Oct and Nov", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 411512, "CreatorUserId": 1884116, "OwnerUserId": 1884116.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 835452.0, "CurrentDatasourceVersionId": 858240.0, "ForumId": 423828, "Type": 2, "CreationDate": "11/10/2019 19:19:39", "LastActivityDate": "11/10/2019", "TotalViews": 270956, "TotalDownloads": 26233, "TotalVotes": 543, "TotalKernels": 37}]
[{"Id": 1884116, "UserName": "mkechinov", "DisplayName": "Michael Kechinov", "RegisterDate": "05/04/2018", "PerformanceTier": 2}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import plotly.express as px from datetime import datetime import warnings warnings.filterwarnings("ignore") plt.style.use("fivethirtyeight") pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 150) pd.options.display.float_format = "{:,}".format import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) dtype = { "user_id": "uint32", "product_id": "uint32", "event_type": "category", "category_code": "category", "brand": "category", "user_session": "category", } file = "/kaggle/input/ecommerce-behavior-data-from-multi-category-store/2019-Nov.csv" df = pd.read_csv(file, dtype=dtype) df df.info() df.describe() # # DATA CLEANING # converting event time to date and hour only df.insert( loc=1, column="date_utc", value=pd.to_datetime(df["event_time"].apply(lambda s: str(s)[0:10])), ) df.insert( loc=2, column="hour", value=(df["event_time"].apply(lambda s: str(s)[11:13])).astype("uint8"), ) df = df.drop("event_time", axis=1) df # visualizing null values msno.bar(df) plt.show() df = df.fillna("unknown") df.isna().sum() # we're gonna remove items with 'purchase' event type in our dataframe purchase = df[df["event_type"] == "purchase"] # removing purchase data from original data then dropping duplicates df = df[df["event_type"] != "purchase"].drop_duplicates() # concatenating both data df = pd.concat([df, purchase], ignore_index=True) df # # EDA # Knowing Your Customers # Q1: Customer Behavior Analysis # Visitors, Possible Customers & Buyers visitors = ( df.groupby("event_type")["user_id"] .agg(["count"]) .sort_values(by="count", ascending=False) .rename(columns={"count": "count_of_users"}) .reset_index() ) visitors["prcnt"] = ( 100 * visitors["count_of_users"] / visitors["count_of_users"].sum() ).round(1) print(visitors) px.pie(visitors, values="prcnt", names="event_type", template="plotly_dark") # # Customer activity is stable over the month & increased at Nov 14-17, 2019 (wednesday-sunday) # engagement of customers towards the store (view, cart, purchase) count activity = ( df.groupby(["date_utc", "hour"])["user_id"] .agg(["count"]) .reset_index() .sort_values(by=["date_utc", "hour"]) ) activity["time"] = pd.to_datetime(activity["hour"], format="%H").dt.strftime("%I %p") activity["week_day"] = activity["date_utc"].dt.day_name() activity["day"] = activity["date_utc"].dt.day_of_week activity month_activity = activity.groupby("date_utc")["count"].agg(["sum"]).reset_index() fig = px.bar( month_activity, x="date_utc", y="sum", title="Sum of Users Over the Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() week_activity = ( activity.groupby(["week_day", "day"])["count"] .agg(["mean"]) .round() .astype("uint32") .rename(columns={"mean": "average_user_by_day"}) .sort_values(by="day") .reset_index() ) fig1 = px.line( week_activity, x="week_day", y="average_user_by_day", text="average_user_by_day", title="Average User Count by Day", template="plotly_dark", ) fig1.update_xaxes(type="category") fig1.show() time_activity = ( activity.groupby(["hour", "time"])["count"] .agg(["mean"]) .round() .astype("uint32") .rename(columns={"mean": "average_users_by_hour"}) .reset_index() ) fig2 = px.line( time_activity, x="time", y="average_users_by_hour", title="Average User Count by Hour", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear", type="category") fig2.show() # # Q2: Store's Revenue Analysis purchase revenue = purchase.groupby(["date_utc", "hour"])["price"].sum().reset_index().round(2) revenue["time"] = pd.to_datetime(revenue["hour"], format="%H").dt.strftime("%I %p") revenue["week_day"] = revenue["date_utc"].dt.day_name() revenue["day"] = revenue["date_utc"].dt.day_of_week revenue.head() rev_month = revenue.groupby(["date_utc"])["price"].sum().round(2).reset_index() fig = px.bar( rev_month, x="date_utc", y="price", title="Revenue Over The Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() rev_week = ( revenue.groupby(["day", "week_day"])["price"].agg(["mean"]).round(2).reset_index() ) fig1 = px.line( rev_week, x="week_day", y="mean", text="mean", title="Average Revenue Over by Day Of Week", template="plotly_dark", ) fig1.update_xaxes(tickmode="linear") fig1.show() rev_hour = revenue.groupby(["hour", "time"])["price"].mean().round(2).reset_index() fig2 = px.line( rev_hour, x="time", y="price", text="price", title="Average Revenue Over The Hour", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear") fig2.show() rev_hour = ( purchase.groupby(["date_utc", "hour"])["user_id"].agg(["count"]).reset_index() ) rev_hour = ( rev_hour.groupby("hour")["count"] .agg(["mean"]) .round() .astype("uint16") .rename(columns={"mean": "average_purchase_activity"}) .reset_index() ) rev_hour["time"] = pd.to_datetime(rev_hour["hour"], format="%H").dt.strftime("%I %p") fig = px.line( rev_hour, x="time", y="average_purchase_activity", text="average_purchase_activity", title="Average Purchase Activity Over The Hour For The Whole Month", template="plotly_dark", ) fig.update_xaxes(tickmode="linear") fig.show() item = ( purchase.groupby("brand")["price"] .agg(["sum"]) .sort_values(by="sum", ascending=False) .round(2) .reset_index() .iloc[0:10] ) item["brand"] = item["brand"].str.capitalize() fig1 = px.bar( item, x="brand", y="sum", title="Top 10 Brands in terms of Revenue", template="plotly_dark", ) fig1.update_xaxes(tickmode="linear") fig1.show() p_item = purchase["brand"].value_counts().reset_index().iloc[0:10] p_item["brand"] = p_item["brand"].str.capitalize() fig2 = px.bar( p_item, x="brand", y="count", text="count", title="Top 10 Brands in Terms of Purchase Count", template="plotly_dark", ) fig2.update_xaxes(tickmode="linear") fig2.show() loyal = ( purchase.groupby(["user_id"])["price"] .agg(["sum"]) .round(2) .rename(columns={"sum": "amount_spent"}) .sort_values(by="amount_spent", ascending=False) .reset_index() .iloc[0:10] ) fig3 = px.bar( loyal, x="user_id", y="amount_spent", text="amount_spent", title="Top 10 Customers via Amount Spent", template="plotly_dark", ) fig3.update_xaxes(tickmode="linear", type="category") fig3.show()
false
0
2,265
0
3,463
2,265
129682532
<jupyter_start><jupyter_text>Diamonds ### Context This classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization. ### Content **price** price in US dollars (\$326--\$18,823) **carat** weight of the diamond (0.2--5.01) **cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal) **color** diamond colour, from J (worst) to D (best) **clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best)) **x** length in mm (0--10.74) **y** width in mm (0--58.9) **z** depth in mm (0--31.8) **depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79) **table** width of top of diamond relative to widest point (43--95) Kaggle dataset identifier: diamonds <jupyter_code>import pandas as pd df = pd.read_csv('diamonds/diamonds.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 53940 entries, 0 to 53939 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 53940 non-null int64 1 carat 53940 non-null float64 2 cut 53940 non-null object 3 color 53940 non-null object 4 clarity 53940 non-null object 5 depth 53940 non-null float64 6 table 53940 non-null float64 7 price 53940 non-null int64 8 x 53940 non-null float64 9 y 53940 non-null float64 10 z 53940 non-null float64 dtypes: float64(6), int64(2), object(3) memory usage: 4.5+ MB <jupyter_text>Examples: { "Unnamed: 0": 1, "carat": 0.23, "cut": "Ideal", "color": "E", "clarity": "SI2", "depth": 61.5, "table": 55, "price": 326, "x": 3.95, "y": 3.98, "z": 2.43 } { "Unnamed: 0": 2, "carat": 0.21, "cut": "Premium", "color": "E", "clarity": "SI1", "depth": 59.8, "table": 61, "price": 326, "x": 3.89, "y": 3.84, "z": 2.31 } { "Unnamed: 0": 3, "carat": 0.23, "cut": "Good", "color": "E", "clarity": "VS1", "depth": 56.9, "table": 65, "price": 327, "x": 4.05, "y": 4.07, "z": 2.31 } { "Unnamed: 0": 4, "carat": 0.29, "cut": "Premium", "color": "I", "clarity": "VS2", "depth": 62.4, "table": 58, "price": 334, "x": 4.2, "y": 4.23, "z": 2.63 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Importing lib import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab from scipy import stats from scipy.stats import norm, skew from sklearn import preprocessing # preprocessing : from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder from sklearn.model_selection import train_test_split data = pd.read_csv("/kaggle/input/diamonds/diamonds.csv") # # EDA import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.metrics import mean_squared_error, r2_score import seaborn as sns import xgboost as xgb data.head() data = data.drop(data.columns[0], axis=1) data.head() data.info() # There is no null Values data.describe() # We have values of x, y and z equal to 0. We will remove them. # data = data[(data[["x", "y", "z"]] != 0).all(axis=1)] data.describe() # Show Categorical data and categorical inside it categorical_columns = [col for col in data.columns if data[col].dtype == "object"] categorical_columns # Getting the categories of each categorical column for col in categorical_columns: print(col, data[col].unique()) # Visualize the categ data # Checking the corr with price and other attributes categorical_data = data.select_dtypes(exclude=[np.number]).columns def pie(column): labels = data[column].unique() size = [] for i in labels: value = data[column].value_counts()[[i]] size.append(int(value.values)) return labels, size labels, sizes = pie("cut") labels2, sizes2 = pie("color") labels3, sizes3 = pie("clarity") # visualstion for categorical fig, axes = plt.subplots(3, 2, figsize=(20, 20)) colors = sns.color_palette("pastel")[0:8] fig.suptitle("Price Range vs all categorical factor") axes[0, 0].pie(x=sizes, labels=labels, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[0, 1], data=data, x="cut", y="price") axes[1, 0].pie(x=sizes2, labels=labels2, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[1, 1], data=data, x="color", y="price") axes[2, 0].pie(x=sizes3, labels=labels3, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[2, 1], data=data, x="clarity", y="price") plt.show() # Extracting the correlation between price and the other columns corr = data.corr()["price"].sort_values(ascending=False) corr corr_feats = corr.index[1:5] corr_feats for feat in corr_feats: plt.figure(figsize=(5, 5)) sns.displot(x=feat, data=data) plt.xlabel(feat) for feat in corr_feats: plt.figure(figsize=(5, 5)) sns.boxplot(x=feat, data=data) plt.xlabel(feat) corr = data.corr() sns.heatmap(data=corr, square=True, annot=True, cbar=True) data.head() # Before working and applying model we need change categ data by OHE # convert categorical value label_cut = preprocessing.LabelEncoder() label_color = preprocessing.LabelEncoder() label_clarity = preprocessing.LabelEncoder() data["cut"] = label_cut.fit_transform(data["cut"]) data["color"] = label_color.fit_transform(data["color"]) data["clarity"] = label_clarity.fit_transform(data["clarity"]) X = data.drop(["price"], axis=1) y = data["price"] X_train, X_vali, y_train, y_vali = train_test_split( X, y, test_size=0.2, random_state=42 ) lr = LinearRegression() lr.fit(X_train, y_train) # Make predictions on the validation data y_pred = lr.predict(X_vali) # Compute MEA mse = mean_squared_error(y_vali, y_pred) print("Mean squared error: {:.2f}".format(mse))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/682/129682532.ipynb
diamonds
shivam2503
[{"Id": 129682532, "ScriptId": 38564002, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13557325, "CreationDate": "05/15/2023 17:57:20", "VersionNumber": 1.0, "Title": "notebookbc69aa4268", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 162.0, "LinesInsertedFromPrevious": 162.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186003699, "KernelVersionId": 129682532, "SourceDatasetVersionId": 2368}]
[{"Id": 2368, "DatasetId": 1312, "DatasourceVersionId": 2368, "CreatorUserId": 945829, "LicenseName": "Unknown", "CreationDate": "05/25/2017 03:06:57", "VersionNumber": 1.0, "Title": "Diamonds", "Slug": "diamonds", "Subtitle": "Analyze diamonds by their cut, color, clarity, price, and other attributes", "Description": "### Context \n\nThis classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization.\n\n### Content\n\n**price** price in US dollars (\\$326--\\$18,823)\n\n**carat** weight of the diamond (0.2--5.01)\n\n**cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal)\n\n**color** diamond colour, from J (worst) to D (best)\n\n**clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))\n\n**x** length in mm (0--10.74)\n\n**y** width in mm (0--58.9)\n\n**z** depth in mm (0--31.8)\n\n**depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79)\n\n**table** width of top of diamond relative to widest point (43--95)", "VersionNotes": "Initial release", "TotalCompressedBytes": 3192560.0, "TotalUncompressedBytes": 3192560.0}]
[{"Id": 1312, "CreatorUserId": 945829, "OwnerUserId": 945829.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2368.0, "CurrentDatasourceVersionId": 2368.0, "ForumId": 3701, "Type": 2, "CreationDate": "05/25/2017 03:06:57", "LastActivityDate": "02/06/2018", "TotalViews": 434479, "TotalDownloads": 74575, "TotalVotes": 952, "TotalKernels": 444}]
[{"Id": 945829, "UserName": "shivam2503", "DisplayName": "Shivam Agrawal", "RegisterDate": "03/07/2017", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Importing lib import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab from scipy import stats from scipy.stats import norm, skew from sklearn import preprocessing # preprocessing : from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder from sklearn.model_selection import train_test_split data = pd.read_csv("/kaggle/input/diamonds/diamonds.csv") # # EDA import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.metrics import mean_squared_error, r2_score import seaborn as sns import xgboost as xgb data.head() data = data.drop(data.columns[0], axis=1) data.head() data.info() # There is no null Values data.describe() # We have values of x, y and z equal to 0. We will remove them. # data = data[(data[["x", "y", "z"]] != 0).all(axis=1)] data.describe() # Show Categorical data and categorical inside it categorical_columns = [col for col in data.columns if data[col].dtype == "object"] categorical_columns # Getting the categories of each categorical column for col in categorical_columns: print(col, data[col].unique()) # Visualize the categ data # Checking the corr with price and other attributes categorical_data = data.select_dtypes(exclude=[np.number]).columns def pie(column): labels = data[column].unique() size = [] for i in labels: value = data[column].value_counts()[[i]] size.append(int(value.values)) return labels, size labels, sizes = pie("cut") labels2, sizes2 = pie("color") labels3, sizes3 = pie("clarity") # visualstion for categorical fig, axes = plt.subplots(3, 2, figsize=(20, 20)) colors = sns.color_palette("pastel")[0:8] fig.suptitle("Price Range vs all categorical factor") axes[0, 0].pie(x=sizes, labels=labels, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[0, 1], data=data, x="cut", y="price") axes[1, 0].pie(x=sizes2, labels=labels2, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[1, 1], data=data, x="color", y="price") axes[2, 0].pie(x=sizes3, labels=labels3, colors=colors, autopct="%1.1f%%") sns.boxplot(ax=axes[2, 1], data=data, x="clarity", y="price") plt.show() # Extracting the correlation between price and the other columns corr = data.corr()["price"].sort_values(ascending=False) corr corr_feats = corr.index[1:5] corr_feats for feat in corr_feats: plt.figure(figsize=(5, 5)) sns.displot(x=feat, data=data) plt.xlabel(feat) for feat in corr_feats: plt.figure(figsize=(5, 5)) sns.boxplot(x=feat, data=data) plt.xlabel(feat) corr = data.corr() sns.heatmap(data=corr, square=True, annot=True, cbar=True) data.head() # Before working and applying model we need change categ data by OHE # convert categorical value label_cut = preprocessing.LabelEncoder() label_color = preprocessing.LabelEncoder() label_clarity = preprocessing.LabelEncoder() data["cut"] = label_cut.fit_transform(data["cut"]) data["color"] = label_color.fit_transform(data["color"]) data["clarity"] = label_clarity.fit_transform(data["clarity"]) X = data.drop(["price"], axis=1) y = data["price"] X_train, X_vali, y_train, y_vali = train_test_split( X, y, test_size=0.2, random_state=42 ) lr = LinearRegression() lr.fit(X_train, y_train) # Make predictions on the validation data y_pred = lr.predict(X_vali) # Compute MEA mse = mean_squared_error(y_vali, y_pred) print("Mean squared error: {:.2f}".format(mse))
[{"diamonds/diamonds.csv": {"column_names": "[\"Unnamed: 0\", \"carat\", \"cut\", \"color\", \"clarity\", \"depth\", \"table\", \"price\", \"x\", \"y\", \"z\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"carat\": \"float64\", \"cut\": \"object\", \"color\": \"object\", \"clarity\": \"object\", \"depth\": \"float64\", \"table\": \"float64\", \"price\": \"int64\", \"x\": \"float64\", \"y\": \"float64\", \"z\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 53940 entries, 0 to 53939\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 53940 non-null int64 \n 1 carat 53940 non-null float64\n 2 cut 53940 non-null object \n 3 color 53940 non-null object \n 4 clarity 53940 non-null object \n 5 depth 53940 non-null float64\n 6 table 53940 non-null float64\n 7 price 53940 non-null int64 \n 8 x 53940 non-null float64\n 9 y 53940 non-null float64\n 10 z 53940 non-null float64\ndtypes: float64(6), int64(2), object(3)\nmemory usage: 4.5+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 53940.0, \"mean\": 26970.5, \"std\": 15571.281096942537, \"min\": 1.0, \"25%\": 13485.75, \"50%\": 26970.5, \"75%\": 40455.25, \"max\": 53940.0}, \"carat\": {\"count\": 53940.0, \"mean\": 0.7979397478680014, \"std\": 0.4740112444054184, \"min\": 0.2, \"25%\": 0.4, \"50%\": 0.7, \"75%\": 1.04, \"max\": 5.01}, \"depth\": {\"count\": 53940.0, \"mean\": 61.749404894327036, \"std\": 1.432621318833661, \"min\": 43.0, \"25%\": 61.0, \"50%\": 61.8, \"75%\": 62.5, \"max\": 79.0}, \"table\": {\"count\": 53940.0, \"mean\": 57.45718390804598, \"std\": 2.2344905628213225, \"min\": 43.0, \"25%\": 56.0, \"50%\": 57.0, \"75%\": 59.0, \"max\": 95.0}, \"price\": {\"count\": 53940.0, \"mean\": 3932.799721913237, \"std\": 3989.439738146379, \"min\": 326.0, \"25%\": 950.0, \"50%\": 2401.0, \"75%\": 5324.25, \"max\": 18823.0}, \"x\": {\"count\": 53940.0, \"mean\": 5.731157211716722, \"std\": 1.1217607467924928, \"min\": 0.0, \"25%\": 4.71, \"50%\": 5.7, \"75%\": 6.54, \"max\": 10.74}, \"y\": {\"count\": 53940.0, \"mean\": 5.734525954764553, \"std\": 1.1421346741235552, \"min\": 0.0, \"25%\": 4.72, \"50%\": 5.71, \"75%\": 6.54, \"max\": 58.9}, \"z\": {\"count\": 53940.0, \"mean\": 3.5387337782721544, \"std\": 0.7056988469499941, \"min\": 0.0, \"25%\": 2.91, \"50%\": 3.53, \"75%\": 4.04, \"max\": 31.8}}", "examples": "{\"Unnamed: 0\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"carat\":{\"0\":0.23,\"1\":0.21,\"2\":0.23,\"3\":0.29},\"cut\":{\"0\":\"Ideal\",\"1\":\"Premium\",\"2\":\"Good\",\"3\":\"Premium\"},\"color\":{\"0\":\"E\",\"1\":\"E\",\"2\":\"E\",\"3\":\"I\"},\"clarity\":{\"0\":\"SI2\",\"1\":\"SI1\",\"2\":\"VS1\",\"3\":\"VS2\"},\"depth\":{\"0\":61.5,\"1\":59.8,\"2\":56.9,\"3\":62.4},\"table\":{\"0\":55.0,\"1\":61.0,\"2\":65.0,\"3\":58.0},\"price\":{\"0\":326,\"1\":326,\"2\":327,\"3\":334},\"x\":{\"0\":3.95,\"1\":3.89,\"2\":4.05,\"3\":4.2},\"y\":{\"0\":3.98,\"1\":3.84,\"2\":4.07,\"3\":4.23},\"z\":{\"0\":2.43,\"1\":2.31,\"2\":2.31,\"3\":2.63}}"}}]
true
1
<start_data_description><data_path>diamonds/diamonds.csv: <column_names> ['Unnamed: 0', 'carat', 'cut', 'color', 'clarity', 'depth', 'table', 'price', 'x', 'y', 'z'] <column_types> {'Unnamed: 0': 'int64', 'carat': 'float64', 'cut': 'object', 'color': 'object', 'clarity': 'object', 'depth': 'float64', 'table': 'float64', 'price': 'int64', 'x': 'float64', 'y': 'float64', 'z': 'float64'} <dataframe_Summary> {'Unnamed: 0': {'count': 53940.0, 'mean': 26970.5, 'std': 15571.281096942537, 'min': 1.0, '25%': 13485.75, '50%': 26970.5, '75%': 40455.25, 'max': 53940.0}, 'carat': {'count': 53940.0, 'mean': 0.7979397478680014, 'std': 0.4740112444054184, 'min': 0.2, '25%': 0.4, '50%': 0.7, '75%': 1.04, 'max': 5.01}, 'depth': {'count': 53940.0, 'mean': 61.749404894327036, 'std': 1.432621318833661, 'min': 43.0, '25%': 61.0, '50%': 61.8, '75%': 62.5, 'max': 79.0}, 'table': {'count': 53940.0, 'mean': 57.45718390804598, 'std': 2.2344905628213225, 'min': 43.0, '25%': 56.0, '50%': 57.0, '75%': 59.0, 'max': 95.0}, 'price': {'count': 53940.0, 'mean': 3932.799721913237, 'std': 3989.439738146379, 'min': 326.0, '25%': 950.0, '50%': 2401.0, '75%': 5324.25, 'max': 18823.0}, 'x': {'count': 53940.0, 'mean': 5.731157211716722, 'std': 1.1217607467924928, 'min': 0.0, '25%': 4.71, '50%': 5.7, '75%': 6.54, 'max': 10.74}, 'y': {'count': 53940.0, 'mean': 5.734525954764553, 'std': 1.1421346741235552, 'min': 0.0, '25%': 4.72, '50%': 5.71, '75%': 6.54, 'max': 58.9}, 'z': {'count': 53940.0, 'mean': 3.5387337782721544, 'std': 0.7056988469499941, 'min': 0.0, '25%': 2.91, '50%': 3.53, '75%': 4.04, 'max': 31.8}} <dataframe_info> RangeIndex: 53940 entries, 0 to 53939 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 53940 non-null int64 1 carat 53940 non-null float64 2 cut 53940 non-null object 3 color 53940 non-null object 4 clarity 53940 non-null object 5 depth 53940 non-null float64 6 table 53940 non-null float64 7 price 53940 non-null int64 8 x 53940 non-null float64 9 y 53940 non-null float64 10 z 53940 non-null float64 dtypes: float64(6), int64(2), object(3) memory usage: 4.5+ MB <some_examples> {'Unnamed: 0': {'0': 1, '1': 2, '2': 3, '3': 4}, 'carat': {'0': 0.23, '1': 0.21, '2': 0.23, '3': 0.29}, 'cut': {'0': 'Ideal', '1': 'Premium', '2': 'Good', '3': 'Premium'}, 'color': {'0': 'E', '1': 'E', '2': 'E', '3': 'I'}, 'clarity': {'0': 'SI2', '1': 'SI1', '2': 'VS1', '3': 'VS2'}, 'depth': {'0': 61.5, '1': 59.8, '2': 56.9, '3': 62.4}, 'table': {'0': 55.0, '1': 61.0, '2': 65.0, '3': 58.0}, 'price': {'0': 326, '1': 326, '2': 327, '3': 334}, 'x': {'0': 3.95, '1': 3.89, '2': 4.05, '3': 4.2}, 'y': {'0': 3.98, '1': 3.84, '2': 4.07, '3': 4.23}, 'z': {'0': 2.43, '1': 2.31, '2': 2.31, '3': 2.63}} <end_description>
1,338
0
2,382
1,338
129716518
<jupyter_start><jupyter_text>CIFAKE: Real and AI-Generated Synthetic Images # CIFAKE: Real and AI-Generated Synthetic Images The quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness. CIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI? Further information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126) ![Images from the CIFAKE dataset](https://i.imgur.com/RiOwf8i.png) ## Dataset details The dataset contains two classes - REAL and FAKE. For REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) For the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4 There are 100,000 images for training (50k per class) and 20,000 for testing (10k per class) ## Papers with Code The dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images) [https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images) ## References If you use this dataset, you **must** cite the following sources [Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl) [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126) Real images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published. ## Notes The updates to the dataset on the 28th of March 2023 did not change anything; the file formats ".jpeg" were renamed ".jpg" and the root folder was uploaded to meet Kaggle's usability requirements. ## License This dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE): *Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:* *The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.* *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.* Kaggle dataset identifier: cifake-real-and-ai-generated-synthetic-images <jupyter_script># ## Basic Libraries import pandas as pd import numpy as np # ## Libraries for Image import cv2 import PIL PIL.Image.open( "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/5999.jpg" ) image = cv2.imread( "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/5999.jpg" ) image.shape # ## **TRAIN DATAFRAME** # ## Creating Training Dataframe for **"Fake Image"** train_path_fake = [] base_1 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/" for i in range(1000, 6000): a = "{}{}.jpg".format(base_1, i) train_path_fake.append(a) for i in range(1000, 6000): for j in range(2, 11): b = "{}{} ({}).jpg".format(base_1, i, j) train_path_fake.append(b) len(train_path_fake) df_train_fake = pd.DataFrame(train_path_fake) df_train_fake.columns = ["path"] df_train_fake["label"] = 0 df_train_fake.head() # ## Creating Training Dataframe for **"Real Image"** train_path_real = [] base_2 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/REAL/" for i in range(0, 10): c = "{}000{}.jpg".format(base_2, i) train_path_real.append(c) for i in range(0, 10): for j in range(2, 11): d = "{}000{} ({}).jpg".format(base_2, i, j) train_path_real.append(d) len(train_path_real) for i in range(10, 100): e = "{}00{}.jpg".format(base_2, i) train_path_real.append(e) for i in range(10, 100): for j in range(2, 11): f = "{}00{} ({}).jpg".format(base_2, i, j) train_path_real.append(f) len(train_path_real) for i in range(100, 1000): g = "{}0{}.jpg".format(base_2, i) train_path_real.append(g) for i in range(100, 1000): for j in range(2, 11): h = "{}0{} ({}).jpg".format(base_2, i, j) train_path_real.append(h) len(train_path_real) for i in range(1000, 5000): k = "{}{}.jpg".format(base_2, i) train_path_real.append(k) for i in range(1000, 5000): for j in range(2, 11): l = "{}{} ({}).jpg".format(base_2, i, j) train_path_real.append(l) len(train_path_real) df_train_real = pd.DataFrame(train_path_real) df_train_real.columns = ["path"] df_train_real["label"] = 1 df_train_real.head() # ## Using **"concat"** to create **"df_train"** df_train = pd.concat((df_train_fake, df_train_real), axis=0) print(df_train.shape) df_train.sample(5) # ## Using **'cv2'** for X_train image_df = [] for image in df_train["path"]: img = cv2.imread(image) resized = cv2.resize(img, (32, 32)) image_df.append(resized) image_array = np.array(image_df) X_train = image_array / 255 X_train.ndim y_train = df_train["label"] y_train.head() # ## **TEST DATAFRAME** # ## Creating Testing Dataframe for **"Fake Image"** test_path_fake = [] base_3 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test/FAKE/" for i in range(0, 1000): m = "{}{}.jpg".format(base_3, i) test_path_fake.append(m) for i in range(0, 1000): for j in range(2, 11): n = "{}{} ({}).jpg".format(base_3, i, j) test_path_fake.append(n) len(test_path_fake) df_test_fake = pd.DataFrame(test_path_fake) df_test_fake.columns = ["path"] df_test_fake["label"] = 0 df_test_fake.head() # ## Creating Testing Dataframe for **"Real Image"** test_path_real = [] base_4 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test/REAL/" for i in range(0, 10): o = "{}000{}.jpg".format(base_4, i) test_path_real.append(o) for i in range(0, 10): for j in range(2, 11): p = "{}000{} ({}).jpg".format(base_4, i, j) test_path_real.append(p) len(test_path_real) for i in range(10, 100): q = "{}00{}.jpg".format(base_4, i) test_path_real.append(q) for i in range(10, 100): for j in range(2, 11): r = "{}00{} ({}).jpg".format(base_4, i, j) test_path_real.append(r) len(test_path_real) for i in range(100, 1000): s = "{}0{}.jpg".format(base_4, i) test_path_real.append(s) for i in range(100, 1000): for j in range(2, 11): t = "{}0{} ({}).jpg".format(base_4, i, j) test_path_real.append(t) len(test_path_real) df_test_real = pd.DataFrame(test_path_real) df_test_real.columns = ["path"] df_test_real["label"] = 1 df_test_real.head() # ## Using **"concat"** to create **"df_test"** df_test = pd.concat((df_test_fake, df_test_real), axis=0) print(df_test.shape) df_test.sample(5) # ## Using **"cv2"** to create X_test image_ds = [] for image in df_test["path"]: imge = cv2.imread(image) resize = cv2.resize(imge, (32, 32)) image_ds.append(resize) image_arry = np.array(image_ds) X_test = image_arry / 255 X_test.ndim y_test = df_test["label"] y_test.head() # ## Importing **"keras"** for CNN from tensorflow.keras import models, layers model = models.Sequential( [ layers.Conv2D( filters=80, kernel_size=(3, 3), activation="relu", input_shape=(32, 32, 3) ), layers.MaxPool2D((2, 2)), layers.Conv2D(filters=40, kernel_size=(3, 3), activation="relu"), layers.MaxPool2D((2, 2)), layers.Conv2D(filters=20, kernel_size=(3, 3), activation="relu"), layers.MaxPool2D((2, 2)), layers.Flatten(), layers.Dense(10, activation="relu"), layers.Dense(2, activation="sigmoid"), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) model.fit(X_train, y_train, epochs=5) model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred_label = [np.argmax(i) for i in y_pred] y_pred_label[:5] # ## Classification Report from sklearn.metrics import classification_report print("Classification Report : \n\n\n", classification_report(y_test, y_pred_label)) # ## Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred_label) import matplotlib.pyplot as plt import seaborn as sn plt.figure(figsize=(4, 4), facecolor="pink", edgecolor="brown") sn.heatmap(cm, annot=True, fmt="d") plt.xlabel("Prediction", fontdict={"family": "fantasy", "color": "black", "size": 15}) plt.ylabel("Truth", fontdict={"family": "fantasy", "color": "black", "size": 15}) plt.title( "Confusion Matrix", fontdict={"family": "fantasy", "color": "red", "size": 20} ) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/716/129716518.ipynb
cifake-real-and-ai-generated-synthetic-images
birdy654
[{"Id": 129716518, "ScriptId": 38477875, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13051013, "CreationDate": "05/16/2023 01:52:04", "VersionNumber": 1.0, "Title": "92% - Real and AI-Generated Synthetic Images - CNN", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 260.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186054090, "KernelVersionId": 129716518, "SourceDatasetVersionId": 5256696}]
[{"Id": 5256696, "DatasetId": 3041726, "DatasourceVersionId": 5329502, "CreatorUserId": 2039603, "LicenseName": "Other (specified in description)", "CreationDate": "03/28/2023 16:00:29", "VersionNumber": 3.0, "Title": "CIFAKE: Real and AI-Generated Synthetic Images", "Slug": "cifake-real-and-ai-generated-synthetic-images", "Subtitle": "Can Computer Vision detect when images have been generated by AI?", "Description": "# CIFAKE: Real and AI-Generated Synthetic Images\nThe quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness.\n\nCIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI?\n\nFurther information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\n![Images from the CIFAKE dataset](https://i.imgur.com/RiOwf8i.png)\n\n## Dataset details\nThe dataset contains two classes - REAL and FAKE. \n\nFor REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html)\n\nFor the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4\n\nThere are 100,000 images for training (50k per class) and 20,000 for testing (10k per class)\n\n## Papers with Code\nThe dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n[https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n\n\n## References\nIf you use this dataset, you **must** cite the following sources\n\n[Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl)\n\n[Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\nReal images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published.\n\n## Notes\n\nThe updates to the dataset on the 28th of March 2023 did not change anything; the file formats \".jpeg\" were renamed \".jpg\" and the root folder was uploaded to meet Kaggle's usability requirements.\n\n## License\nThis dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE):\n\n*Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:*\n\n*The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.*\n\n*THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*", "VersionNotes": "Kaggle compatibility fix (no actual changes)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3041726, "CreatorUserId": 2039603, "OwnerUserId": 2039603.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5256696.0, "CurrentDatasourceVersionId": 5329502.0, "ForumId": 3081274, "Type": 2, "CreationDate": "03/24/2023 13:22:42", "LastActivityDate": "03/24/2023", "TotalViews": 13728, "TotalDownloads": 1803, "TotalVotes": 46, "TotalKernels": 15}]
[{"Id": 2039603, "UserName": "birdy654", "DisplayName": "Jordan J. Bird", "RegisterDate": "07/03/2018", "PerformanceTier": 2}]
# ## Basic Libraries import pandas as pd import numpy as np # ## Libraries for Image import cv2 import PIL PIL.Image.open( "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/5999.jpg" ) image = cv2.imread( "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/5999.jpg" ) image.shape # ## **TRAIN DATAFRAME** # ## Creating Training Dataframe for **"Fake Image"** train_path_fake = [] base_1 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/FAKE/" for i in range(1000, 6000): a = "{}{}.jpg".format(base_1, i) train_path_fake.append(a) for i in range(1000, 6000): for j in range(2, 11): b = "{}{} ({}).jpg".format(base_1, i, j) train_path_fake.append(b) len(train_path_fake) df_train_fake = pd.DataFrame(train_path_fake) df_train_fake.columns = ["path"] df_train_fake["label"] = 0 df_train_fake.head() # ## Creating Training Dataframe for **"Real Image"** train_path_real = [] base_2 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train/REAL/" for i in range(0, 10): c = "{}000{}.jpg".format(base_2, i) train_path_real.append(c) for i in range(0, 10): for j in range(2, 11): d = "{}000{} ({}).jpg".format(base_2, i, j) train_path_real.append(d) len(train_path_real) for i in range(10, 100): e = "{}00{}.jpg".format(base_2, i) train_path_real.append(e) for i in range(10, 100): for j in range(2, 11): f = "{}00{} ({}).jpg".format(base_2, i, j) train_path_real.append(f) len(train_path_real) for i in range(100, 1000): g = "{}0{}.jpg".format(base_2, i) train_path_real.append(g) for i in range(100, 1000): for j in range(2, 11): h = "{}0{} ({}).jpg".format(base_2, i, j) train_path_real.append(h) len(train_path_real) for i in range(1000, 5000): k = "{}{}.jpg".format(base_2, i) train_path_real.append(k) for i in range(1000, 5000): for j in range(2, 11): l = "{}{} ({}).jpg".format(base_2, i, j) train_path_real.append(l) len(train_path_real) df_train_real = pd.DataFrame(train_path_real) df_train_real.columns = ["path"] df_train_real["label"] = 1 df_train_real.head() # ## Using **"concat"** to create **"df_train"** df_train = pd.concat((df_train_fake, df_train_real), axis=0) print(df_train.shape) df_train.sample(5) # ## Using **'cv2'** for X_train image_df = [] for image in df_train["path"]: img = cv2.imread(image) resized = cv2.resize(img, (32, 32)) image_df.append(resized) image_array = np.array(image_df) X_train = image_array / 255 X_train.ndim y_train = df_train["label"] y_train.head() # ## **TEST DATAFRAME** # ## Creating Testing Dataframe for **"Fake Image"** test_path_fake = [] base_3 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test/FAKE/" for i in range(0, 1000): m = "{}{}.jpg".format(base_3, i) test_path_fake.append(m) for i in range(0, 1000): for j in range(2, 11): n = "{}{} ({}).jpg".format(base_3, i, j) test_path_fake.append(n) len(test_path_fake) df_test_fake = pd.DataFrame(test_path_fake) df_test_fake.columns = ["path"] df_test_fake["label"] = 0 df_test_fake.head() # ## Creating Testing Dataframe for **"Real Image"** test_path_real = [] base_4 = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test/REAL/" for i in range(0, 10): o = "{}000{}.jpg".format(base_4, i) test_path_real.append(o) for i in range(0, 10): for j in range(2, 11): p = "{}000{} ({}).jpg".format(base_4, i, j) test_path_real.append(p) len(test_path_real) for i in range(10, 100): q = "{}00{}.jpg".format(base_4, i) test_path_real.append(q) for i in range(10, 100): for j in range(2, 11): r = "{}00{} ({}).jpg".format(base_4, i, j) test_path_real.append(r) len(test_path_real) for i in range(100, 1000): s = "{}0{}.jpg".format(base_4, i) test_path_real.append(s) for i in range(100, 1000): for j in range(2, 11): t = "{}0{} ({}).jpg".format(base_4, i, j) test_path_real.append(t) len(test_path_real) df_test_real = pd.DataFrame(test_path_real) df_test_real.columns = ["path"] df_test_real["label"] = 1 df_test_real.head() # ## Using **"concat"** to create **"df_test"** df_test = pd.concat((df_test_fake, df_test_real), axis=0) print(df_test.shape) df_test.sample(5) # ## Using **"cv2"** to create X_test image_ds = [] for image in df_test["path"]: imge = cv2.imread(image) resize = cv2.resize(imge, (32, 32)) image_ds.append(resize) image_arry = np.array(image_ds) X_test = image_arry / 255 X_test.ndim y_test = df_test["label"] y_test.head() # ## Importing **"keras"** for CNN from tensorflow.keras import models, layers model = models.Sequential( [ layers.Conv2D( filters=80, kernel_size=(3, 3), activation="relu", input_shape=(32, 32, 3) ), layers.MaxPool2D((2, 2)), layers.Conv2D(filters=40, kernel_size=(3, 3), activation="relu"), layers.MaxPool2D((2, 2)), layers.Conv2D(filters=20, kernel_size=(3, 3), activation="relu"), layers.MaxPool2D((2, 2)), layers.Flatten(), layers.Dense(10, activation="relu"), layers.Dense(2, activation="sigmoid"), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) model.fit(X_train, y_train, epochs=5) model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred_label = [np.argmax(i) for i in y_pred] y_pred_label[:5] # ## Classification Report from sklearn.metrics import classification_report print("Classification Report : \n\n\n", classification_report(y_test, y_pred_label)) # ## Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred_label) import matplotlib.pyplot as plt import seaborn as sn plt.figure(figsize=(4, 4), facecolor="pink", edgecolor="brown") sn.heatmap(cm, annot=True, fmt="d") plt.xlabel("Prediction", fontdict={"family": "fantasy", "color": "black", "size": 15}) plt.ylabel("Truth", fontdict={"family": "fantasy", "color": "black", "size": 15}) plt.title( "Confusion Matrix", fontdict={"family": "fantasy", "color": "red", "size": 20} ) plt.show()
false
0
2,433
1
3,475
2,433
129351362
# ========================= # Import libraries # ========================= import os, glob import random import cv2 import pandas as pd import polars as pl from tqdm import tqdm import numpy as np import plotly.express as px import matplotlib.pyplot as plt def show_df(df, num=3, tail=True): print(df.shape) display(df.head(num)) if tail: display(df.tail(num)) defog_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/*.csv" ) tdcsfog_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog/*.csv" ) notype_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/notype/*.csv" ) # # 1. defog -domestic test- print("-" * 80) print("Data num of each_path") print("-" * 80) print(f"defog_path: {len(defog_path)}") print(f"tdcsfog_path: {len(tdcsfog_path)}") print(f"notype_path: {len(notype_path)}") print("-" * 80) df_list = [] for idx, path in tqdm(enumerate(defog_path)): df = pl.read_csv(path) filename = os.path.basename(path).split(".cs")[0] tmp = pl.DataFrame( { "idx": [idx] * len(df), "filename": [filename] * len(df), } ) df = pl.concat([df, tmp], how="horizontal") df_list.append(df) df_defog = pl.concat(df_list) show_df(df_defog) df_defog_sum = ( df_defog.select("filename", "StartHesitation", "Turn", "Walking") .groupby("filename") .sum() ) show_df(df_defog_sum) df_defog_sum = df_defog_sum.to_pandas() print("-" * 80) print("StartHesitation") print("-" * 80) df_defog_sum.sort_values("StartHesitation", ascending=False, inplace=True) starthesitation_paths = df_defog_sum["filename"][0:3] print(*starthesitation_paths) show_df(df_defog_sum, 5, False) print("-" * 80) print("Turn") print("-" * 80) df_defog_sum.sort_values("Turn", ascending=False, inplace=True) turn_paths = df_defog_sum["filename"][0:3] print(*turn_paths) show_df(df_defog_sum, 5, False) print("-" * 80) print("Walking") print("-" * 80) df_defog_sum.sort_values("Walking", ascending=False, inplace=True) walking_paths = df_defog_sum["filename"][0:3] print(*walking_paths) show_df(df_defog_sum, 5, False) # ========================= # plotlyで3Dプロットする # ========================= import plotly.express as px fig = px.scatter_3d( df_defog_sum, x="StartHesitation", y="Turn", z="Walking", symbol="filename" ) fig.show() def show_defog(df): # 1列×4行のサブプロットを作成 fig, axs = plt.subplots(4, 1, figsize=(30, 10)) # サブプロット1: AccV axs[0].plot(df.index / 10, df.AccV) axs[0].set_ylabel("AccV[g] -Vertical-") # サブプロット2: AccML axs[1].plot(df.index / 10, df.AccML) axs[1].set_ylabel("AccML[g] -RightLeft-") # サブプロット3: AccAP axs[2].plot(df.index / 10, df.AccAP) axs[2].set_ylabel("AccAP[g] -ForwardBack-") # サブプロット4: Freazing Flags axs[3].plot(df.index / 10, df.StartHesitation, label="StartHesitation") axs[3].plot(df.index / 10, df.Turn, label="Turn") axs[3].plot(df.index / 10, df.Walking, label="Walking") axs[3].set_ylabel("freazing_flag") axs[3].set_xlabel("time[sec] -defog is 100Hz sampling-") axs[3].legend() # グラフを表示 plt.show() for path in starthesitation_paths: print("-" * 80) print("StartHesitation") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) for path in turn_paths: print("-" * 80) print("Turn") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) for path in walking_paths: print("-" * 80) print("Walking") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) # # 2. tdcsfog -laboratory test- df_list = [] for idx, path in tqdm(enumerate(tdcsfog_path)): df = pl.read_csv(path) filename = os.path.basename(path).split(".cs")[0] tmp = pl.DataFrame( { "idx": [idx] * len(df), "filename": [filename] * len(df), } ) df = pl.concat([df, tmp], how="horizontal") df_list.append(df) df_tdcsfog = pl.concat(df_list) show_df(df_tdcsfog) print("-" * 80) print("Unique CSV files") print("-" * 80) print(f"num of unique csv files(defog) : {len(defog_path)}") print(f"num of unique csv files(tdcsfog): {len(tdcsfog_path)}") print() print("-" * 80) print("Data Length") print("-" * 80) print(f"length of defog data : {len(df_defog)}") print(f"length of tdcsfog data : {len(df_tdcsfog)}") print() print(f"length of defog data is {len(df_defog)/len(df_tdcsfog):.2f} times larger!!") df_tdcsfog_sum = ( df_tdcsfog.select("filename", "StartHesitation", "Turn", "Walking") .groupby("filename") .sum() ) show_df(df_tdcsfog_sum)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/351/129351362.ipynb
null
null
[{"Id": 129351362, "ScriptId": 38437708, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5196442, "CreationDate": "05/13/2023 03:40:23", "VersionNumber": 2.0, "Title": "EDA-Parkinson-", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 113.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 39.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ========================= # Import libraries # ========================= import os, glob import random import cv2 import pandas as pd import polars as pl from tqdm import tqdm import numpy as np import plotly.express as px import matplotlib.pyplot as plt def show_df(df, num=3, tail=True): print(df.shape) display(df.head(num)) if tail: display(df.tail(num)) defog_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/*.csv" ) tdcsfog_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog/*.csv" ) notype_path = glob.glob( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/notype/*.csv" ) # # 1. defog -domestic test- print("-" * 80) print("Data num of each_path") print("-" * 80) print(f"defog_path: {len(defog_path)}") print(f"tdcsfog_path: {len(tdcsfog_path)}") print(f"notype_path: {len(notype_path)}") print("-" * 80) df_list = [] for idx, path in tqdm(enumerate(defog_path)): df = pl.read_csv(path) filename = os.path.basename(path).split(".cs")[0] tmp = pl.DataFrame( { "idx": [idx] * len(df), "filename": [filename] * len(df), } ) df = pl.concat([df, tmp], how="horizontal") df_list.append(df) df_defog = pl.concat(df_list) show_df(df_defog) df_defog_sum = ( df_defog.select("filename", "StartHesitation", "Turn", "Walking") .groupby("filename") .sum() ) show_df(df_defog_sum) df_defog_sum = df_defog_sum.to_pandas() print("-" * 80) print("StartHesitation") print("-" * 80) df_defog_sum.sort_values("StartHesitation", ascending=False, inplace=True) starthesitation_paths = df_defog_sum["filename"][0:3] print(*starthesitation_paths) show_df(df_defog_sum, 5, False) print("-" * 80) print("Turn") print("-" * 80) df_defog_sum.sort_values("Turn", ascending=False, inplace=True) turn_paths = df_defog_sum["filename"][0:3] print(*turn_paths) show_df(df_defog_sum, 5, False) print("-" * 80) print("Walking") print("-" * 80) df_defog_sum.sort_values("Walking", ascending=False, inplace=True) walking_paths = df_defog_sum["filename"][0:3] print(*walking_paths) show_df(df_defog_sum, 5, False) # ========================= # plotlyで3Dプロットする # ========================= import plotly.express as px fig = px.scatter_3d( df_defog_sum, x="StartHesitation", y="Turn", z="Walking", symbol="filename" ) fig.show() def show_defog(df): # 1列×4行のサブプロットを作成 fig, axs = plt.subplots(4, 1, figsize=(30, 10)) # サブプロット1: AccV axs[0].plot(df.index / 10, df.AccV) axs[0].set_ylabel("AccV[g] -Vertical-") # サブプロット2: AccML axs[1].plot(df.index / 10, df.AccML) axs[1].set_ylabel("AccML[g] -RightLeft-") # サブプロット3: AccAP axs[2].plot(df.index / 10, df.AccAP) axs[2].set_ylabel("AccAP[g] -ForwardBack-") # サブプロット4: Freazing Flags axs[3].plot(df.index / 10, df.StartHesitation, label="StartHesitation") axs[3].plot(df.index / 10, df.Turn, label="Turn") axs[3].plot(df.index / 10, df.Walking, label="Walking") axs[3].set_ylabel("freazing_flag") axs[3].set_xlabel("time[sec] -defog is 100Hz sampling-") axs[3].legend() # グラフを表示 plt.show() for path in starthesitation_paths: print("-" * 80) print("StartHesitation") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) for path in turn_paths: print("-" * 80) print("Turn") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) for path in walking_paths: print("-" * 80) print("Walking") print("-" * 80) tmp = pd.read_csv( f"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{path}.csv" ) print(f"path: {path}") show_defog(tmp) # # 2. tdcsfog -laboratory test- df_list = [] for idx, path in tqdm(enumerate(tdcsfog_path)): df = pl.read_csv(path) filename = os.path.basename(path).split(".cs")[0] tmp = pl.DataFrame( { "idx": [idx] * len(df), "filename": [filename] * len(df), } ) df = pl.concat([df, tmp], how="horizontal") df_list.append(df) df_tdcsfog = pl.concat(df_list) show_df(df_tdcsfog) print("-" * 80) print("Unique CSV files") print("-" * 80) print(f"num of unique csv files(defog) : {len(defog_path)}") print(f"num of unique csv files(tdcsfog): {len(tdcsfog_path)}") print() print("-" * 80) print("Data Length") print("-" * 80) print(f"length of defog data : {len(df_defog)}") print(f"length of tdcsfog data : {len(df_tdcsfog)}") print() print(f"length of defog data is {len(df_defog)/len(df_tdcsfog):.2f} times larger!!") df_tdcsfog_sum = ( df_tdcsfog.select("filename", "StartHesitation", "Turn", "Walking") .groupby("filename") .sum() ) show_df(df_tdcsfog_sum)
false
0
1,931
0
1,931
1,931
129351051
<jupyter_start><jupyter_text>ETH hour The dataset consists of stock market data for the cryptocurrency Ethereum (ETH) from July 1st, 2017 at 11:00:00 AM until March 13th, 2020 at 8:00:00 PM. Contains 23674 rows and 9 columns Kaggle dataset identifier: ethhour <jupyter_script># # Time chart (ETH example) # This notebook is designed to teach you how to create time series plots and financial plots using Python. It covers different types of plots such as line charts, candlestick charts, and interactive plots. It also explains how to handle datetime variables and how to customize your plots. These skills are especially useful for those interested in finance, as they can be used to analyze and visualize financial data over time. By following the examples and exercises in this notebook, you will gain a solid understanding of how to create informative and visually appealing time series and financial plots. # Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go import datetime import matplotlib.dates as mdates # Import data import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/ethhour/ETH.csv", index_col=0) df.head() # convert date to datetime df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d %I-%p") # See day df["Day_week"] = df["Date"].dt.day_name() df.head() df.dtypes print(f"The total rows are {df.shape[0]} /nThe total rows are {df.shape[1]} ") # Range days print( f"The first date an hour is {df.Date.min()} /nThe last date an hour is {df.Date.max()} " ) # # 1. Time plots # Time series analysis is a way of analyzing data that is indexed by time. Python provides many powerful tools for working with time series data, including the pandas library and matplotlib for visualization. # ## 1.2 Line chart # A line chart, also known as a line plot or line graph, is a type of chart that displays data as a series of data points connected by straight lines. It is commonly used to show trends over time. Line charts are often used in finance, economics, and other fields to visualize time series data and to identify patterns and trends in the data. # ### 1.2.1 Single line chart # The chapter shows how to plot time series data for all available time intervals, including days, weeks, months, and years. # Create a line chart for the high price over time plt.figure(figsize=(10, 6)) # The x-axis displays the dates, and the y-axis displays high values. plt.plot(df["Date"], df["High"]) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values over time") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific month (January 2020). plt.figure(figsize=(10, 6)) # Set the start and end dates of the month start_date = pd.to_datetime("2020-01-01", format="%Y-%m-%d") end_date = pd.to_datetime("2020-01-31", format="%Y-%m-%d") # Filter data for the month one_month = df.loc[(df["Date"] >= start_date) & (df["Date"] <= end_date)] # Plot the high values plt.plot(one_month["Date"], one_month["High"]) # Format the x-axis labels date_format = "%Y-%m-%d" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for January 2020") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific month (01/10/2020 - 01/17/2020). plt.figure(figsize=(10, 6)) # This code is creating a line plot of the high values of a specific week. start_date = pd.to_datetime("2020-01-10", format="%Y-%m-%d") end_date = pd.to_datetime("2020-01-17", format="%Y-%m-%d") # Filter data for one week one_week = df.loc[(df["Date"] >= start_date) & (df["Date"] <= end_date)] plt.plot(one_week["Date"], one_week["High"]) # axis in days date_format = "%Y-%m-%d" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for the week of 01/10/2020 - 01/17/2020") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific day (January 10th, 2022). plt.figure(figsize=(10, 6)) # Filter data for one day one_day = df.loc[df["Date"].dt.date == datetime.date(2020, 1, 10)] plt.plot(one_day["Date"], one_day["High"]) # axis in hours date_format = " %H:%M" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Hours") plt.ylabel("Price") plt.title("High values on 10/01/2020") # Display the plot plt.show() print(start_date) # ### 1.2.2 Group values by dates # The chapter show how to group data by date in one phase using the pandas resample() function. This is a powerful function for grouping time-series data as it can perform aggregation operations on the groups such as taking the mean, sum, or count of values. # For implement the funtions resample the Date will be fixed as a Date df.set_index("Date", inplace=True) # this resample creates the max value for days in a High column df.High.resample("D").max() # The common rule options in pandas resample method: # * 'D' for calendar day frequency # * 'W' for weekly frequency # * 'M' for month end frequency # * 'Q' for quarter end frequency # * 'A' for year end frequency # * 'H' for hourly frequency # * 'T' or 'min' for minute frequency # * 'S' for second frequency # * 'L' or 'ms' for millisecond frequency # * 'U' or 'us' for microsecond frequency # * 'N' for nanosecond frequency # You can combine nomber with a frecuency, for example, '5D' would represent a frequency of 5 calendar days. # Plot with resample fig, ax = plt.subplots(figsize=(10, 6)) # Resample data by day and get max high value for each day daily_max_high = df.High.resample("D").max() # Get the maximum value per week max_weekly = df.High.resample("W").max() # Get the maximum value per month max_monthly = df.High.resample("M").max() # Get the maximum value per quarter max_quarterly = df.High.resample("Q").max() # Plot maximum values per week daily_max_high.plot(ax=ax, label="Daily") # Plot maximum values per week max_weekly.plot(ax=ax, label="Weekly") # Plot maximum values per month max_monthly.plot(ax=ax, label="Monthly") # Plot maximum values per quarter max_quarterly.plot(ax=ax, label="Quarterly") # Set x-axis label plt.xlabel("Date") # Set y-axis label plt.ylabel("High Value") # Set title plt.title("Max values per Week, Month, and Quarter") # show legends plt.legend() # Show plot plt.show() # The resample can use work with multiple funtions as: # * mean(): calculates the mean of the values in each time bin # * sum(): calculates the sum of the values in each time bin # * max(): calculates the maximum value in each time bin # * min(): calculates the minimum value in each time bin # * std(): calculates the standard deviation of the values in each time bin # * count(): counts the number of values in each time bin # * first(): gets the first value in each time bin # * last(): gets the last value in each time bin # Plot with resample mean fig, ax = plt.subplots(figsize=(10, 6)) # Calculate the mean of daily high values daily_high = df.High.resample("D").mean() # Calculate the mean of weekly high values weekly_high = df.High.resample("W").mean() # Calculate the mean of monthly high values monthly_high = df.High.resample("M").mean() # Calculate the mean of quarterly high values quarterly_high = df.High.resample("Q").mean() # Plot the mean of daily, weekly, monthly, and quarterly high values plt.plot(daily_high.index, daily_high, label="Daily") plt.plot(weekly_high.index, weekly_high, label="Weekly") plt.plot(monthly_high.index, monthly_high, label="Monthly") plt.plot(quarterly_high.index, quarterly_high, label="Quarterly") # Set x-axis label plt.xlabel("Date") # Set y-axis label plt.ylabel("High Value") # Set title plt.title("Mean values per Week, Month, and Quarter") # show legends plt.legend() # Show plot plt.show() # Plot with resample mean, max and min in one week fig, ax = plt.subplots(figsize=(10, 6)) one_week = df.loc["2020-01-10":"2020-01-17"] # Resample data to daily frequency one_day = one_week.resample("D") # Plot max, min, and mean values for each day plt.plot(one_day["High"].max(), label="Max") plt.plot(one_day["High"].min(), label="Min") plt.plot(one_day["High"].mean(), label="Mean") # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for the week of 01/10/2020 - 01/17/2020") # Add legend plt.legend() # Display the plot plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/351/129351051.ipynb
ethhour
camiloandresavila
[{"Id": 129351051, "ScriptId": 38457105, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5593777, "CreationDate": "05/13/2023 03:35:23", "VersionNumber": 6.0, "Title": "ETH_date_review", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 293.0, "LinesInsertedFromPrevious": 207.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 86.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185314848, "KernelVersionId": 129351051, "SourceDatasetVersionId": 5673240}]
[{"Id": 5673240, "DatasetId": 3261380, "DatasourceVersionId": 5748780, "CreatorUserId": 5593777, "LicenseName": "Unknown", "CreationDate": "05/13/2023 01:12:43", "VersionNumber": 1.0, "Title": "ETH hour", "Slug": "ethhour", "Subtitle": "This is a time series dataset with daily observations of the Ethereum-USD (ETHUS", "Description": "The dataset consists of stock market data for the cryptocurrency Ethereum (ETH) from July 1st, 2017 at 11:00:00 AM until March 13th, 2020 at 8:00:00 PM. \n\nContains 23674 rows and 9 columns", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3261380, "CreatorUserId": 5593777, "OwnerUserId": 5593777.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5673240.0, "CurrentDatasourceVersionId": 5748780.0, "ForumId": 3326975, "Type": 2, "CreationDate": "05/13/2023 01:12:43", "LastActivityDate": "05/13/2023", "TotalViews": 69, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 5593777, "UserName": "camiloandresavila", "DisplayName": "Camilo Andres Avila Carre\u00f1o", "RegisterDate": "08/08/2020", "PerformanceTier": 0}]
# # Time chart (ETH example) # This notebook is designed to teach you how to create time series plots and financial plots using Python. It covers different types of plots such as line charts, candlestick charts, and interactive plots. It also explains how to handle datetime variables and how to customize your plots. These skills are especially useful for those interested in finance, as they can be used to analyze and visualize financial data over time. By following the examples and exercises in this notebook, you will gain a solid understanding of how to create informative and visually appealing time series and financial plots. # Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go import datetime import matplotlib.dates as mdates # Import data import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/ethhour/ETH.csv", index_col=0) df.head() # convert date to datetime df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d %I-%p") # See day df["Day_week"] = df["Date"].dt.day_name() df.head() df.dtypes print(f"The total rows are {df.shape[0]} /nThe total rows are {df.shape[1]} ") # Range days print( f"The first date an hour is {df.Date.min()} /nThe last date an hour is {df.Date.max()} " ) # # 1. Time plots # Time series analysis is a way of analyzing data that is indexed by time. Python provides many powerful tools for working with time series data, including the pandas library and matplotlib for visualization. # ## 1.2 Line chart # A line chart, also known as a line plot or line graph, is a type of chart that displays data as a series of data points connected by straight lines. It is commonly used to show trends over time. Line charts are often used in finance, economics, and other fields to visualize time series data and to identify patterns and trends in the data. # ### 1.2.1 Single line chart # The chapter shows how to plot time series data for all available time intervals, including days, weeks, months, and years. # Create a line chart for the high price over time plt.figure(figsize=(10, 6)) # The x-axis displays the dates, and the y-axis displays high values. plt.plot(df["Date"], df["High"]) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values over time") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific month (January 2020). plt.figure(figsize=(10, 6)) # Set the start and end dates of the month start_date = pd.to_datetime("2020-01-01", format="%Y-%m-%d") end_date = pd.to_datetime("2020-01-31", format="%Y-%m-%d") # Filter data for the month one_month = df.loc[(df["Date"] >= start_date) & (df["Date"] <= end_date)] # Plot the high values plt.plot(one_month["Date"], one_month["High"]) # Format the x-axis labels date_format = "%Y-%m-%d" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for January 2020") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific month (01/10/2020 - 01/17/2020). plt.figure(figsize=(10, 6)) # This code is creating a line plot of the high values of a specific week. start_date = pd.to_datetime("2020-01-10", format="%Y-%m-%d") end_date = pd.to_datetime("2020-01-17", format="%Y-%m-%d") # Filter data for one week one_week = df.loc[(df["Date"] >= start_date) & (df["Date"] <= end_date)] plt.plot(one_week["Date"], one_week["High"]) # axis in days date_format = "%Y-%m-%d" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for the week of 01/10/2020 - 01/17/2020") # Display the plot plt.show() # This code is creating a line plot of the high values of a specific day (January 10th, 2022). plt.figure(figsize=(10, 6)) # Filter data for one day one_day = df.loc[df["Date"].dt.date == datetime.date(2020, 1, 10)] plt.plot(one_day["Date"], one_day["High"]) # axis in hours date_format = " %H:%M" plt.xticks(rotation=45, ha="right") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(date_format)) # Add axis labels and title plt.xlabel("Hours") plt.ylabel("Price") plt.title("High values on 10/01/2020") # Display the plot plt.show() print(start_date) # ### 1.2.2 Group values by dates # The chapter show how to group data by date in one phase using the pandas resample() function. This is a powerful function for grouping time-series data as it can perform aggregation operations on the groups such as taking the mean, sum, or count of values. # For implement the funtions resample the Date will be fixed as a Date df.set_index("Date", inplace=True) # this resample creates the max value for days in a High column df.High.resample("D").max() # The common rule options in pandas resample method: # * 'D' for calendar day frequency # * 'W' for weekly frequency # * 'M' for month end frequency # * 'Q' for quarter end frequency # * 'A' for year end frequency # * 'H' for hourly frequency # * 'T' or 'min' for minute frequency # * 'S' for second frequency # * 'L' or 'ms' for millisecond frequency # * 'U' or 'us' for microsecond frequency # * 'N' for nanosecond frequency # You can combine nomber with a frecuency, for example, '5D' would represent a frequency of 5 calendar days. # Plot with resample fig, ax = plt.subplots(figsize=(10, 6)) # Resample data by day and get max high value for each day daily_max_high = df.High.resample("D").max() # Get the maximum value per week max_weekly = df.High.resample("W").max() # Get the maximum value per month max_monthly = df.High.resample("M").max() # Get the maximum value per quarter max_quarterly = df.High.resample("Q").max() # Plot maximum values per week daily_max_high.plot(ax=ax, label="Daily") # Plot maximum values per week max_weekly.plot(ax=ax, label="Weekly") # Plot maximum values per month max_monthly.plot(ax=ax, label="Monthly") # Plot maximum values per quarter max_quarterly.plot(ax=ax, label="Quarterly") # Set x-axis label plt.xlabel("Date") # Set y-axis label plt.ylabel("High Value") # Set title plt.title("Max values per Week, Month, and Quarter") # show legends plt.legend() # Show plot plt.show() # The resample can use work with multiple funtions as: # * mean(): calculates the mean of the values in each time bin # * sum(): calculates the sum of the values in each time bin # * max(): calculates the maximum value in each time bin # * min(): calculates the minimum value in each time bin # * std(): calculates the standard deviation of the values in each time bin # * count(): counts the number of values in each time bin # * first(): gets the first value in each time bin # * last(): gets the last value in each time bin # Plot with resample mean fig, ax = plt.subplots(figsize=(10, 6)) # Calculate the mean of daily high values daily_high = df.High.resample("D").mean() # Calculate the mean of weekly high values weekly_high = df.High.resample("W").mean() # Calculate the mean of monthly high values monthly_high = df.High.resample("M").mean() # Calculate the mean of quarterly high values quarterly_high = df.High.resample("Q").mean() # Plot the mean of daily, weekly, monthly, and quarterly high values plt.plot(daily_high.index, daily_high, label="Daily") plt.plot(weekly_high.index, weekly_high, label="Weekly") plt.plot(monthly_high.index, monthly_high, label="Monthly") plt.plot(quarterly_high.index, quarterly_high, label="Quarterly") # Set x-axis label plt.xlabel("Date") # Set y-axis label plt.ylabel("High Value") # Set title plt.title("Mean values per Week, Month, and Quarter") # show legends plt.legend() # Show plot plt.show() # Plot with resample mean, max and min in one week fig, ax = plt.subplots(figsize=(10, 6)) one_week = df.loc["2020-01-10":"2020-01-17"] # Resample data to daily frequency one_day = one_week.resample("D") # Plot max, min, and mean values for each day plt.plot(one_day["High"].max(), label="Max") plt.plot(one_day["High"].min(), label="Min") plt.plot(one_day["High"].mean(), label="Mean") # Add axis labels and title plt.xlabel("Date") plt.ylabel("Price") plt.title("High values for the week of 01/10/2020 - 01/17/2020") # Add legend plt.legend() # Display the plot plt.show()
false
1
2,653
0
2,746
2,653
129351981
<jupyter_start><jupyter_text>Edmonton Neighborhood & Housing Data Kaggle dataset identifier: edmonton-neighborhood-and-housing-data <jupyter_script># ### Configuration and Importing Of Modules # Used for modification of data import numpy as np import pandas as pd # Graphing import plotly.express as px import plotly.graph_objects as go import plotly.io as pio import seaborn as sns import matplotlib.pyplot as plt # Config of pandas module and matplotlib in terms of displaying data pd.set_option("display.max_columns", 60) pd.set_option("display.max_rows", 60) # ## House Data Overview # Basic Information on our dataset which we will use house_data = pd.read_csv( "/kaggle/input/edmonton-neighborhood-and-housing-data/edmonton_housing_data_Feb15_2022.csv" ) # Fairly long dataset print(house_data.shape) # We will have to do lots of processing and feature engineering to be able to make this work well print(house_data.info()) # ## House Data Formatting and Removal # ##### Usually data exploration is conducted. However, I collected this data myself so I already have a good understanding of it and have worked with it in a different Juypter File # # Drop duplicate rows house_data = house_data.applymap(lambda s: s.lower().strip() if type(s) == str else s) house_data.drop_duplicates(inplace=True) # # Combine half baths and full baths into a float integer house_data["Half Baths"] = house_data["Half Baths"].replace(np.nan, 0) house_data["Full Baths"] = house_data["Full Baths"].replace(np.nan, 0) house_data["Bathrooms"] = house_data["Full Baths"] + (house_data["Half Baths"] / 2) # Combine Condo Fee + HOA Fee and format it correctly house_data["House Fee"] = pd.to_numeric( house_data["Condo Fee"].replace({np.nan: "0"}).str.replace("[$,]", "", regex=True) ) + pd.to_numeric( house_data["HOA Fees"].replace({np.nan: "0"}).str.replace("[$,]", "", regex=True) ) house_data["House Fee"] = house_data["House Fee"].replace({np.nan: 0}) # Check the house description for the substring "renov", if it is found then the house is rennovated. Binary formatting house_data["Renovated"] = house_data["House Description"].map( lambda x: 1 if "renov" in x else 0 ) # Translating yes/no into binary house_data["RE / Bank Owned"] = house_data["RE / Bank Owned"].replace( {"no": 0, "yes": 1} ) house_data["Fireplace"] = house_data["Fireplaces"].replace( {"no": 0, np.nan: 0, "^.*": 1}, regex=True ) house_data["Is Waterfront"] = house_data["Is Waterfront"].replace({"no": 0, "yes": 1}) house_data["Has Basement"] = house_data["Has Basement"].replace({"no": 0, "yes": 1}) house_data["Has Pool"] = house_data["Has Pool"].replace({"no": 0, "yes": 1}) house_data["Separate Entrance"] = house_data["Separate Entrance"].replace( {"no": 0, "yes": 1} ) # # Formatting into clean numbers house_data["Price"] = pd.to_numeric( house_data["Price"].str.replace("[$,]", "", regex=True) ) house_data["Square Footage"] = pd.to_numeric( house_data["Square Footage"].str.replace(",", "") ) house_data["# of Garages"] = pd.to_numeric( house_data["# of Garages"].replace({np.nan: 0, "4+": "", "9+": "9"}) ) house_data["# of Garages"] = house_data["# of Garages"].replace({np.nan: 0}) # Cleaning categorical data and making sure that categories don't overlap house_data["Style"] = house_data["Style"].replace( {"hillside bungalow": "bungalow", "raised bungalow": "bungalow"}, regex=True ) house_data["Construction"] = house_data["Construction"].replace( {"unknown": np.nan, "insulated concrete": "concrete", "see remarks": np.nan} ) house_data["Foundation"] = house_data["Foundation"].replace( { "concrete slab": "concrete", "see remarks": np.nan, "brick/stone/block": "block", "insulated concrete form": "concrete", "piling": "wood", "preserved wood": "wood", } ) # Dropping all columns based on use of information, relevancy and what improves model accuracy # Adjusted this a lot during the project to improve model accuracy house_data.drop( [ "Link", "House Description", "MLS® #", "Full Baths", "Half Baths", "Bedrooms Above Grade", "Address", "Area", "Condo", "City", "County", "Province", "Postal Code", "Features", "Parking Spaces", "Parking", "Interior Features", "Exterior Features", "Elementary", "Middle", "High", "Foreclosure", "Garages", "Zoning", "Lot Description", "HOA Fees", "Condo Fee", "HOA Fees Freq.", "Fireplaces", "Basement", "Exterior", "Interior", "Office", "Heating", ], axis=1, inplace=True, ) house_data.info() # All null values have been removed # ## Neighborhood Data Formatting and Removal # Merging of csv file containing neighborhood data with house dataframe on respective rows neigh_data = pd.read_csv( "/kaggle/input/edmonton-neighborhood-and-housing-data/neigh_data_Feb15_2023.csv" ) neigh_data = neigh_data.rename(columns={"neigh_name": "Community"}) neigh_house_data = pd.merge(house_data, neigh_data, on="Community") # Dropping more columns, again based on what I saw thourghout the model training neigh_house_data.drop( [ "alberta_rank", "percent_rank", "eng_only", "french_only", "Community", "ammenities_grade", ], inplace=True, axis=1, ) # Formatting of appended data, to convert into a number later neigh_house_data["edmonton_rank"] = neigh_house_data["edmonton_rank"].replace( "#", "", regex=True ) neigh_house_data["crime_rate"] = neigh_house_data["crime_rate"].replace( ",", "", regex=True ) neigh_house_data["median_income"] = neigh_house_data["median_income"].str.replace( "[$,]", "", regex=True ) neigh_house_data["in_labor_force"] = neigh_house_data["in_labor_force"].str.replace( "[%]", "", regex=True ) neigh_house_data["unemployment_rate"] = neigh_house_data["unemployment_rate"].replace( "%", "", regex=True ) neigh_house_data["median_house_val"] = neigh_house_data["median_house_val"].replace( "[$,]", "", regex=True ) neigh_house_data["home_owner_percent"] = neigh_house_data["home_owner_percent"].replace( "%", "", regex=True ) neigh_house_data["high_school_percent"] = neigh_house_data[ "high_school_percent" ].replace("%", "", regex=True) neigh_house_data["bach_degree"] = neigh_house_data["bach_degree"].replace( "%", "", regex=True ) neigh_house_data["area_pop"] = neigh_house_data["area_pop"].replace(",", "", regex=True) neigh_house_data["pop_dense"] = neigh_house_data["pop_dense"].replace( ",", "", regex=True ) neigh_house_data["marri_coup"] = neigh_house_data["marri_coup"].replace( "%", "", regex=True ) neigh_house_data["fam_w_kids"] = neigh_house_data["fam_w_kids"].replace( "%", "", regex=True ) neigh_house_data["test_scores"] = neigh_house_data["test_scores"].replace( "%", "", regex=True ) # LabelEndcoing (Converting category data into ) the more complex way in order to save time later # We will need to load up instances of it to convert user input from sklearn.preprocessing import LabelEncoder from collections import defaultdict labelencoder = LabelEncoder() tempDf = neigh_house_data[ ["Type", "Sub-Type", "Style", "Construction", "Foundation", "male_to_fem"] ] lbn = defaultdict(LabelEncoder) # Apply label encoder to the entire dataframe tempDf = tempDf.apply(lambda x: lbn[x.name].fit_transform(x)) neigh_house_data["Type"] = tempDf["Type"] neigh_house_data["Sub-Type"] = tempDf["Sub-Type"] neigh_house_data["Style"] = tempDf["Style"] neigh_house_data["Construction"] = tempDf["Construction"] neigh_house_data["Foundation"] = tempDf["Foundation"] neigh_house_data["male_to_fem"] = tempDf["male_to_fem"] # Convert everything to a numeric value and ensure that we do not have any NA's left neigh_house_data.dropna(axis="rows", inplace=True) neigh_house_data = neigh_house_data.apply(pd.to_numeric) neigh_house_data.info() # Final check to remove useless features plt.figure(figsize=(30, 30)) correlationGraph = neigh_house_data.corr() sns.heatmap(correlationGraph, annot=True, cmap=plt.cm.PuBu, linewidths=0.5) plt.show() # ## Training Models from sklearn.model_selection import train_test_split # Seperating x's from the y-val (dependent) x = neigh_house_data.drop("Price", axis=1) y = neigh_house_data["Price"] # Spliting up data for training and testing x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.1, random_state=42 ) from sklearn.linear_model import * # Linear Regression lr = LinearRegression() lr.fit(x_train, y_train) # Everything below this, is esentially just different forms of linear regression # Ridge Regression ridge = Ridge(alpha=0.5) ridge.fit(x_train, y_train) # Lasso Regression lasso = Lasso(alpha=0.01) lasso.fit(x_train, y_train) # Bayesian Regression bayesian = BayesianRidge() bayesian.fit(x_train, y_train) # ElasticNet Regression en = ElasticNet(alpha=0.01) en.fit(x_train, y_train) # Printing model accuracies print(f"Linear Regression Score: {lr.score(x_test, y_test)}") print(f"Ridge Regression Score: {ridge.score(x_test, y_test)}") print(f"Lasso Regression Score: {lasso.score(x_test, y_test)}") print(f"Bayesian Regression Score: {bayesian.score(x_test, y_test)}") print(f"Elastic Net Regression{en.score(x_test, y_test)}") # We can do better then that! (Hopefully) from sklearn.ensemble import RandomForestRegressor # This is similar to the GradientBoostingRegressor, except this uses multiple descions trees to not for a model, but uses these # to find the average/ result. I am again using GridSearchCV to find the best parameters to tune this algorithim param_grid = { "n_estimators": [200, 500], "max_features": ["auto", "sqrt", "log2"], "max_depth": [4, 5, 6, 7, 8], } CV_rfc = GridSearchCV(estimator=RandomForestRegressor(), param_grid=param_grid, cv=5) CV_rfc.fit(x_train, y_train) print(CV_rfc.best_params_) rf = RandomForestRegressor(max_depth=8, n_estimators=200, max_features="sqrt").fit( x_train, y_train ) print(rf.score(x_test, y_test)) print(mean_absolute_percentage_error(y_test, rf.predict(x_test))) from sklearn import ensemble from sklearn.model_selection import GridSearchCV # GradientBoostingRegressor is the next model I tried. It uses decision trees in a combination with regressor algorithims to build # off of previous models and improve them. I used GridSearchCV, in order to find the best "tuning" parameters for the algorithim parameters = { "learning_rate": [0.01, 0.001, 0.0001], "subsample": [ 0.9, 0.5, 0.2, ], "n_estimators": [100, 500, 1000], "max_depth": [4, 8], } grid_GBR = GridSearchCV( estimator=ensemble.GradientBoostingRegressor(), param_grid=parameters, n_jobs=-1, cv=2, ) grid_GBR.fit(x_train, y_train) print(grid_GBR.best_params_) gbr = ensemble.GradientBoostingRegressor( n_estimators=1000, max_depth=8, learning_rate=0.01, subsample=0.5 ) gbr.fit(x_train, y_train) print(f"The accuracy of GBR is: {gbr.score(x_test,y_test)}") print( f"The mean absolute precentage error is: {mean_absolute_percentage_error(y_test, gbr.predict(x_test))}" ) xgb1 = xg.XGBRegressor() # AKA extremeGradientBoosting, is a faster and more regularized (meaning it may have the higher ability to generalize # which can be good for accuracy) # It is esentially a version of regular gradient boosting, gave it a shot to see if I could increase accuracy even slightly parameters = { "nthread": [4], "objective": ["reg:linear"], "learning_rate": [0.03, 0.05, 0.07], "gamma": [1.5, 2, 6], "max_depth": [5, 6, 7], "min_child_weight": [4], "subsample": [0.7, 0.5], "colsample_bytree": [0.7, 0.2], "n_estimators": [500, 1000], } xgb_grid = GridSearchCV(xgb1, parameters, cv=2, n_jobs=5, verbose=True) xgb_grid.fit(x_train, y_train) print(xgb_grid.best_params_) import xgboost as xg from sklearn.metrics import mean_absolute_percentage_error XGB = xg.XGBRegressor( param={ "colsample_bytree": 0.7, "gamma": 1.5, "learning_rate": 0.03, "max_depth": 6, "min_child_weight": 4, "n_estimators": 500, "nthread": 8, "objective": "reg:linear", "subsample": 0.5, }, ).fit(x_train, y_train) print(f"XGB has a accuracy score of: {model.score(x_test, y_test)}") print( f"XGB has a percentage error of: {mean_absolute_percentage_error(y_test, model.predict(x_test))}" ) # ## Saving Models and Encoders import joblib import pickle # Saving our most accurate model, GBR joblib.dump(gbr, "House_GBR_Feb15_2023.pkl") ridge_from_joblib = model = joblib.load("House_GBR_Feb15_2023.pkl") # Saving our encoders so we can use them later pickle.dump(lbn, open("label_encoder_Feb15_2023.pkl", "wb"))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/351/129351981.ipynb
edmonton-neighborhood-and-housing-data
dilshaansandhu
[{"Id": 129351981, "ScriptId": 38460154, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13467280, "CreationDate": "05/13/2023 03:50:30", "VersionNumber": 1.0, "Title": "Edmonton Real Estate Prediction Model Training", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 324.0, "LinesInsertedFromPrevious": 324.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185316496, "KernelVersionId": 129351981, "SourceDatasetVersionId": 5673825}]
[{"Id": 5673825, "DatasetId": 3261644, "DatasourceVersionId": 5749368, "CreatorUserId": 13467280, "LicenseName": "Unknown", "CreationDate": "05/13/2023 03:38:12", "VersionNumber": 1.0, "Title": "Edmonton Neighborhood & Housing Data", "Slug": "edmonton-neighborhood-and-housing-data", "Subtitle": "A comprehensive dataset of residential property in the city of edmonton", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3261644, "CreatorUserId": 13467280, "OwnerUserId": 13467280.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5673895.0, "CurrentDatasourceVersionId": 5749438.0, "ForumId": 3327239, "Type": 2, "CreationDate": "05/13/2023 03:38:12", "LastActivityDate": "05/13/2023", "TotalViews": 317, "TotalDownloads": 40, "TotalVotes": 5, "TotalKernels": 1}]
[{"Id": 13467280, "UserName": "dilshaansandhu", "DisplayName": "Dilshaan Sandhu", "RegisterDate": "01/30/2023", "PerformanceTier": 0}]
# ### Configuration and Importing Of Modules # Used for modification of data import numpy as np import pandas as pd # Graphing import plotly.express as px import plotly.graph_objects as go import plotly.io as pio import seaborn as sns import matplotlib.pyplot as plt # Config of pandas module and matplotlib in terms of displaying data pd.set_option("display.max_columns", 60) pd.set_option("display.max_rows", 60) # ## House Data Overview # Basic Information on our dataset which we will use house_data = pd.read_csv( "/kaggle/input/edmonton-neighborhood-and-housing-data/edmonton_housing_data_Feb15_2022.csv" ) # Fairly long dataset print(house_data.shape) # We will have to do lots of processing and feature engineering to be able to make this work well print(house_data.info()) # ## House Data Formatting and Removal # ##### Usually data exploration is conducted. However, I collected this data myself so I already have a good understanding of it and have worked with it in a different Juypter File # # Drop duplicate rows house_data = house_data.applymap(lambda s: s.lower().strip() if type(s) == str else s) house_data.drop_duplicates(inplace=True) # # Combine half baths and full baths into a float integer house_data["Half Baths"] = house_data["Half Baths"].replace(np.nan, 0) house_data["Full Baths"] = house_data["Full Baths"].replace(np.nan, 0) house_data["Bathrooms"] = house_data["Full Baths"] + (house_data["Half Baths"] / 2) # Combine Condo Fee + HOA Fee and format it correctly house_data["House Fee"] = pd.to_numeric( house_data["Condo Fee"].replace({np.nan: "0"}).str.replace("[$,]", "", regex=True) ) + pd.to_numeric( house_data["HOA Fees"].replace({np.nan: "0"}).str.replace("[$,]", "", regex=True) ) house_data["House Fee"] = house_data["House Fee"].replace({np.nan: 0}) # Check the house description for the substring "renov", if it is found then the house is rennovated. Binary formatting house_data["Renovated"] = house_data["House Description"].map( lambda x: 1 if "renov" in x else 0 ) # Translating yes/no into binary house_data["RE / Bank Owned"] = house_data["RE / Bank Owned"].replace( {"no": 0, "yes": 1} ) house_data["Fireplace"] = house_data["Fireplaces"].replace( {"no": 0, np.nan: 0, "^.*": 1}, regex=True ) house_data["Is Waterfront"] = house_data["Is Waterfront"].replace({"no": 0, "yes": 1}) house_data["Has Basement"] = house_data["Has Basement"].replace({"no": 0, "yes": 1}) house_data["Has Pool"] = house_data["Has Pool"].replace({"no": 0, "yes": 1}) house_data["Separate Entrance"] = house_data["Separate Entrance"].replace( {"no": 0, "yes": 1} ) # # Formatting into clean numbers house_data["Price"] = pd.to_numeric( house_data["Price"].str.replace("[$,]", "", regex=True) ) house_data["Square Footage"] = pd.to_numeric( house_data["Square Footage"].str.replace(",", "") ) house_data["# of Garages"] = pd.to_numeric( house_data["# of Garages"].replace({np.nan: 0, "4+": "", "9+": "9"}) ) house_data["# of Garages"] = house_data["# of Garages"].replace({np.nan: 0}) # Cleaning categorical data and making sure that categories don't overlap house_data["Style"] = house_data["Style"].replace( {"hillside bungalow": "bungalow", "raised bungalow": "bungalow"}, regex=True ) house_data["Construction"] = house_data["Construction"].replace( {"unknown": np.nan, "insulated concrete": "concrete", "see remarks": np.nan} ) house_data["Foundation"] = house_data["Foundation"].replace( { "concrete slab": "concrete", "see remarks": np.nan, "brick/stone/block": "block", "insulated concrete form": "concrete", "piling": "wood", "preserved wood": "wood", } ) # Dropping all columns based on use of information, relevancy and what improves model accuracy # Adjusted this a lot during the project to improve model accuracy house_data.drop( [ "Link", "House Description", "MLS® #", "Full Baths", "Half Baths", "Bedrooms Above Grade", "Address", "Area", "Condo", "City", "County", "Province", "Postal Code", "Features", "Parking Spaces", "Parking", "Interior Features", "Exterior Features", "Elementary", "Middle", "High", "Foreclosure", "Garages", "Zoning", "Lot Description", "HOA Fees", "Condo Fee", "HOA Fees Freq.", "Fireplaces", "Basement", "Exterior", "Interior", "Office", "Heating", ], axis=1, inplace=True, ) house_data.info() # All null values have been removed # ## Neighborhood Data Formatting and Removal # Merging of csv file containing neighborhood data with house dataframe on respective rows neigh_data = pd.read_csv( "/kaggle/input/edmonton-neighborhood-and-housing-data/neigh_data_Feb15_2023.csv" ) neigh_data = neigh_data.rename(columns={"neigh_name": "Community"}) neigh_house_data = pd.merge(house_data, neigh_data, on="Community") # Dropping more columns, again based on what I saw thourghout the model training neigh_house_data.drop( [ "alberta_rank", "percent_rank", "eng_only", "french_only", "Community", "ammenities_grade", ], inplace=True, axis=1, ) # Formatting of appended data, to convert into a number later neigh_house_data["edmonton_rank"] = neigh_house_data["edmonton_rank"].replace( "#", "", regex=True ) neigh_house_data["crime_rate"] = neigh_house_data["crime_rate"].replace( ",", "", regex=True ) neigh_house_data["median_income"] = neigh_house_data["median_income"].str.replace( "[$,]", "", regex=True ) neigh_house_data["in_labor_force"] = neigh_house_data["in_labor_force"].str.replace( "[%]", "", regex=True ) neigh_house_data["unemployment_rate"] = neigh_house_data["unemployment_rate"].replace( "%", "", regex=True ) neigh_house_data["median_house_val"] = neigh_house_data["median_house_val"].replace( "[$,]", "", regex=True ) neigh_house_data["home_owner_percent"] = neigh_house_data["home_owner_percent"].replace( "%", "", regex=True ) neigh_house_data["high_school_percent"] = neigh_house_data[ "high_school_percent" ].replace("%", "", regex=True) neigh_house_data["bach_degree"] = neigh_house_data["bach_degree"].replace( "%", "", regex=True ) neigh_house_data["area_pop"] = neigh_house_data["area_pop"].replace(",", "", regex=True) neigh_house_data["pop_dense"] = neigh_house_data["pop_dense"].replace( ",", "", regex=True ) neigh_house_data["marri_coup"] = neigh_house_data["marri_coup"].replace( "%", "", regex=True ) neigh_house_data["fam_w_kids"] = neigh_house_data["fam_w_kids"].replace( "%", "", regex=True ) neigh_house_data["test_scores"] = neigh_house_data["test_scores"].replace( "%", "", regex=True ) # LabelEndcoing (Converting category data into ) the more complex way in order to save time later # We will need to load up instances of it to convert user input from sklearn.preprocessing import LabelEncoder from collections import defaultdict labelencoder = LabelEncoder() tempDf = neigh_house_data[ ["Type", "Sub-Type", "Style", "Construction", "Foundation", "male_to_fem"] ] lbn = defaultdict(LabelEncoder) # Apply label encoder to the entire dataframe tempDf = tempDf.apply(lambda x: lbn[x.name].fit_transform(x)) neigh_house_data["Type"] = tempDf["Type"] neigh_house_data["Sub-Type"] = tempDf["Sub-Type"] neigh_house_data["Style"] = tempDf["Style"] neigh_house_data["Construction"] = tempDf["Construction"] neigh_house_data["Foundation"] = tempDf["Foundation"] neigh_house_data["male_to_fem"] = tempDf["male_to_fem"] # Convert everything to a numeric value and ensure that we do not have any NA's left neigh_house_data.dropna(axis="rows", inplace=True) neigh_house_data = neigh_house_data.apply(pd.to_numeric) neigh_house_data.info() # Final check to remove useless features plt.figure(figsize=(30, 30)) correlationGraph = neigh_house_data.corr() sns.heatmap(correlationGraph, annot=True, cmap=plt.cm.PuBu, linewidths=0.5) plt.show() # ## Training Models from sklearn.model_selection import train_test_split # Seperating x's from the y-val (dependent) x = neigh_house_data.drop("Price", axis=1) y = neigh_house_data["Price"] # Spliting up data for training and testing x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.1, random_state=42 ) from sklearn.linear_model import * # Linear Regression lr = LinearRegression() lr.fit(x_train, y_train) # Everything below this, is esentially just different forms of linear regression # Ridge Regression ridge = Ridge(alpha=0.5) ridge.fit(x_train, y_train) # Lasso Regression lasso = Lasso(alpha=0.01) lasso.fit(x_train, y_train) # Bayesian Regression bayesian = BayesianRidge() bayesian.fit(x_train, y_train) # ElasticNet Regression en = ElasticNet(alpha=0.01) en.fit(x_train, y_train) # Printing model accuracies print(f"Linear Regression Score: {lr.score(x_test, y_test)}") print(f"Ridge Regression Score: {ridge.score(x_test, y_test)}") print(f"Lasso Regression Score: {lasso.score(x_test, y_test)}") print(f"Bayesian Regression Score: {bayesian.score(x_test, y_test)}") print(f"Elastic Net Regression{en.score(x_test, y_test)}") # We can do better then that! (Hopefully) from sklearn.ensemble import RandomForestRegressor # This is similar to the GradientBoostingRegressor, except this uses multiple descions trees to not for a model, but uses these # to find the average/ result. I am again using GridSearchCV to find the best parameters to tune this algorithim param_grid = { "n_estimators": [200, 500], "max_features": ["auto", "sqrt", "log2"], "max_depth": [4, 5, 6, 7, 8], } CV_rfc = GridSearchCV(estimator=RandomForestRegressor(), param_grid=param_grid, cv=5) CV_rfc.fit(x_train, y_train) print(CV_rfc.best_params_) rf = RandomForestRegressor(max_depth=8, n_estimators=200, max_features="sqrt").fit( x_train, y_train ) print(rf.score(x_test, y_test)) print(mean_absolute_percentage_error(y_test, rf.predict(x_test))) from sklearn import ensemble from sklearn.model_selection import GridSearchCV # GradientBoostingRegressor is the next model I tried. It uses decision trees in a combination with regressor algorithims to build # off of previous models and improve them. I used GridSearchCV, in order to find the best "tuning" parameters for the algorithim parameters = { "learning_rate": [0.01, 0.001, 0.0001], "subsample": [ 0.9, 0.5, 0.2, ], "n_estimators": [100, 500, 1000], "max_depth": [4, 8], } grid_GBR = GridSearchCV( estimator=ensemble.GradientBoostingRegressor(), param_grid=parameters, n_jobs=-1, cv=2, ) grid_GBR.fit(x_train, y_train) print(grid_GBR.best_params_) gbr = ensemble.GradientBoostingRegressor( n_estimators=1000, max_depth=8, learning_rate=0.01, subsample=0.5 ) gbr.fit(x_train, y_train) print(f"The accuracy of GBR is: {gbr.score(x_test,y_test)}") print( f"The mean absolute precentage error is: {mean_absolute_percentage_error(y_test, gbr.predict(x_test))}" ) xgb1 = xg.XGBRegressor() # AKA extremeGradientBoosting, is a faster and more regularized (meaning it may have the higher ability to generalize # which can be good for accuracy) # It is esentially a version of regular gradient boosting, gave it a shot to see if I could increase accuracy even slightly parameters = { "nthread": [4], "objective": ["reg:linear"], "learning_rate": [0.03, 0.05, 0.07], "gamma": [1.5, 2, 6], "max_depth": [5, 6, 7], "min_child_weight": [4], "subsample": [0.7, 0.5], "colsample_bytree": [0.7, 0.2], "n_estimators": [500, 1000], } xgb_grid = GridSearchCV(xgb1, parameters, cv=2, n_jobs=5, verbose=True) xgb_grid.fit(x_train, y_train) print(xgb_grid.best_params_) import xgboost as xg from sklearn.metrics import mean_absolute_percentage_error XGB = xg.XGBRegressor( param={ "colsample_bytree": 0.7, "gamma": 1.5, "learning_rate": 0.03, "max_depth": 6, "min_child_weight": 4, "n_estimators": 500, "nthread": 8, "objective": "reg:linear", "subsample": 0.5, }, ).fit(x_train, y_train) print(f"XGB has a accuracy score of: {model.score(x_test, y_test)}") print( f"XGB has a percentage error of: {mean_absolute_percentage_error(y_test, model.predict(x_test))}" ) # ## Saving Models and Encoders import joblib import pickle # Saving our most accurate model, GBR joblib.dump(gbr, "House_GBR_Feb15_2023.pkl") ridge_from_joblib = model = joblib.load("House_GBR_Feb15_2023.pkl") # Saving our encoders so we can use them later pickle.dump(lbn, open("label_encoder_Feb15_2023.pkl", "wb"))
false
2
4,252
2
4,287
4,252
129326349
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # # Data visualization # In the era of data-driven decision-making, effective data visualization has become a crucial tool for communicating insights and trends to diverse audiences. However, with an array of chart types available, selecting the most appropriate one for a particular dataset can be a daunting challenge. Numerous factors such as the nature of the data, the intended audience, and the objective of the visual display must be carefully considered. # This article aims to provide a comprehensive guide to selecting the optimal chart for your data, based on extensive research and best practices. Whether you are a novice or a seasoned data analyst, this guide will equip you with the necessary skills to create compelling and informative visualizations that effectively convey your message. Please note that this article focuses solely on chart selection and does not include any code. Without further ado, let us delve into the intricacies of chart selection. # # Table of contents # 1. [Comparing data](#1) # 1. Bar Chart # 1. Column Chart # 1. Stacked Chart # 1. Radar Chart # 2. [Composition and Components](#2) # 1. Pie Chart # 1. Doughnut Chart # 1. Pyramid Chart # 1. Treemap Chart # 1. Funnel Chart # 3. [Tracking data over time](#3) # 1. Line Chart # 1. Spline Chart # 1. Area Chart # 1. Candlestick Chart # 1. OHLC Chart # 1. Sparkline Chart # 4. [Analyzing distribution](#4) # 1. Scatter Chart # 1. Bubble Chart # 1. Box Chart # 1. Error Chart # 1. Heatmap # 1. Range Chart # 1. Polar Chart # 5. [Gauging Performance](#5) # 1. Circular Gauge # 1. Linear Gauge # 1. Bullet Chart # 6. [Project Data](#6) # 1. Gantt Chart # 7. [Geographical Data](#7) # 1. Choropleth Map # 1. Dot Map # 1. Bubble Map # 1. Flow Map # # Comparing data # ## Bar Chart and Column Chart # Bar and column charts are highly effective when visualizing data that involves only one variable. For example, if we want to compare the sales performance of different employees, bar and column charts are ideal. # # [Top](#0) import numpy as np import matplotlib.pyplot as plt sales = np.random.randint(3000, 10000, size=5) person = ["John", "Jake", "Peter", "Mary", "James"] # Create a figure and an axes. fig, ax = plt.subplots() # Plot the data on the axes as a bar chart. ax.barh(person, sales, color="blue") # Set the x-axis and y-axis labels. ax.set_xlabel("Sales") ax.set_ylabel("Person") # Add a title to the graph. ax.set_title("Bar chart of sales") # Show the plot. plt.show() import numpy as np import matplotlib.pyplot as plt sales = np.random.randint(3000, 10000, size=5) person = ["John", "Jake", "Peter", "Mary", "James"] # Create a figure and an axes. fig, ax = plt.subplots() # Plot the data on the axes. ax.bar(person, sales) # Set the x-axis and y-axis labels. ax.set_xlabel("Sales") ax.set_ylabel("Person") # Add a title to the graph. ax.set_title("Column chart of sales") # Show the plot. plt.show() # # The main difference between these two chart types is their orientation: bar charts are horizontal, while column charts are vertical. However, this difference is merely a matter of presentation and does not affect their function. In fact, bar and column charts are interchangeable and can be used to represent the same data. Regardless of their orientation, both chart types allow for clear and easy comparison of values, making them an excellent choice for analyzing and presenting data with a single variable. # ## Stacked chart and Grouped Bar chart # A stacked chart is a bar chart where each bar is divided into multiple segments, each representing a different category. Stacked charts are used to compare the relative contributions of different categories to a total. For example, a stacked bar chart could be used to compare the sales of different products in a company. Each bar would represent the total sales for a product, and the segments within the bar would represent the sales for each of the company's sales channels. Stacked charts provides a way to provide each categories as well as the sub-categories. # Grouped Bar chart is similar to stacked chart but provides a way to compare each sub-categories instead of the main categories. import matplotlib.pyplot as plt import numpy as np # Generate some random data categories = ["Store A", "Store B", "Store C", "Store D"] data = np.random.rand(4, 3) # Create a stacked bar chart fig, ax = plt.subplots() ax.bar(categories, data[:, 0], label="Main clothes") ax.bar(categories, data[:, 1], bottom=data[:, 0], label="Overcoats") ax.bar(categories, data[:, 2], bottom=data[:, 0] + data[:, 1], label="Accessories") # Add legend and labels ax.legend() ax.set_xlabel("Stores") ax.set_ylabel("Values") ax.set_title("Stacked Bar Chart") # Set the width of each bar bar_width = 0.2 # Set the positions of the bars on the x-axis r1 = np.arange(len(categories)) r2 = [x + bar_width for x in r1] r3 = [x + bar_width for x in r2] # Get group bar series_1 = [sub_array[0] for sub_array in data] series_2 = [sub_array[1] for sub_array in data] series_3 = [sub_array[2] for sub_array in data] # Create the grouped bar chart fig, ax = plt.subplots() ax.bar(r1, series_1, width=bar_width, label="Main clothes") ax.bar(r2, series_2, width=bar_width, label="Overcoats") ax.bar(r3, series_3, width=bar_width, label="Accessories") # Add legend and labels ax.set_xticks([r + bar_width for r in range(len(categories))]) ax.set_xticklabels(categories) ax.legend() ax.set_title("Grouped Column Chart") plt.show() # ## Radial chart # A radar chart, also known as a spider chart or a star chart, is a graphical method of displaying multivariate data in the form of a two-dimensional chart. In a radar chart, each variable is represented by a spoke or axis radiating from the center of the chart, and the values of each variable are plotted as a point or a line around the spokes. # It allows one to one comparison of various aspect of two or more items. It is used in fields like market research, sports, and social sciences. For example, if you want to compare the different attributes of two footballers or the share of each departments. import numpy as np import matplotlib.pyplot as plt # Generate random data for two companies and 8 variables companies = ["Company A", "Company B"] departments = [ "Sales", "Marketing", "HR", "Research", "Customer support", "Admin", "IT", "Legal", ] data = np.random.rand(2, 8) # Create a function to plot the radar chart def plot_radar_chart(data, variables, companies): # Calculate the angle for each variable angles = np.linspace(0, 2 * np.pi, len(variables), endpoint=False) angles = np.concatenate((angles, [angles[0]])) # Create the plot and set the axis labels and limits fig = plt.figure() ax = fig.add_subplot(111, polar=True) ax.set_theta_offset(np.deg2rad(90)) # Rotate plot by 90 degrees ax.set_theta_direction(-1) # Reverse the direction of the plot ax.set_xticklabels(variables) ax.set_ylim(0, 1) # Plot each player's data as a line for i in range(len(companies)): values = data[i, :] values = np.concatenate((values, [values[0]])) ax.plot(angles, values, label=companies[i]) # Add a legend and a title ax.legend(loc="upper right", bbox_to_anchor=(1.3, 1.1)) plt.title("Company Comparison") # Show the plot plt.show() # Call the function to plot the radar chart plot_radar_chart(data, departments, companies) # # # Composition and Components # This is used to compare a part of the data to the whole. # ## Pie chart # This assumes the total is 100%, and it simply compares their shares. import numpy as np import matplotlib.pyplot as plt # Generate random data for 5 categories categories = ["Category 1", "Category 2", "Category 3", "Category 4", "Category 5"] data = [30, 32, 55, 15, 50] # Create a pie chart plt.pie(data, labels=categories, autopct="%1.1f%%") # Add a title plt.title("Pie Chart") # Show the plot plt.show() # One of the main criticism of pie charts is that it focuses on the area, and makes it difficult to read (especially when there are many data), and also when comparing multiple pie charts. As a rule of thumb, it is usually better to compare data by using a bar chart. # Also, when values are too close, it's hard to tell which is greater or if there is a difference. For example, category 1 and category 2, or category 3 and category 5 # ## Doughnut chart import numpy as np import matplotlib.pyplot as plt # Generate random data for 5 categories categories = ["Category 1", "Category 2", "Category 3", "Category 4", "Category 5"] data = np.random.rand(5) * 1000 # Calculate the total sales total_sales = sum(data) # Create a donut chart fig, ax = plt.subplots() ax.pie( data, labels=categories, autopct="%1.1f%%", wedgeprops=dict(width=0.5), startangle=-40, ) # Add a white circle in the center of the chart circle = plt.Circle((0, 0), 0.5, color="white") fig.gca().add_artist(circle) # Add a title with the total sales value plt.text(0, 0, f"Total = {total_sales:.0f}", ha="center", va="center", fontsize=14) # Add a title to the chart plt.title("Donut Chart") # Show the plot plt.show() # This is a pie chart with the centre cut out, but it has many advantages over a pie chart for the following reasons # * It is more accurate depiction because it focuses on the arc and circle circumference # * It is a cleaner data and more readable # * It is more space efficient as you can use the centre for adding more information # ## Pyramid chart # In order to display a simple hierarchy, use a pyramid chart. It stacks data on top of each other, from least to most or vice versa. import matplotlib.pyplot as plt import numpy as np # Define the data labels = [ "Website visit", "Product viewed", "Interest showed", "Purchased", "Return customers", ] values = [50, 45, 35, 25, 10] # values = [10, 25, 35, 45, 60] # Set up the figure and axes fig, ax = plt.subplots() # Plot the left pyramid ax.barh(labels, -np.array(values), align="center", height=0.5, color="red") # Plot the right pyramid ax.barh(labels, np.array(values), align="center", height=0.5, color="red") # Set the limits and labels ax.set_xlim([-max(values), max(values)]) ax.set_xlabel("Users") ax.set_title("Pyramid Chart") # Show the plot plt.show() # ## Tree maps # It compares a lot of data by using nested rectangles. The sizes of each rectangle indicate the relative size of each data, hence it's easy to spot patterns in treemap charts. import matplotlib.pyplot as plt import squarify import random def incrementing_array(size, initial_value=1): return [initial_value + random.randint(i, i**2) for i in range(size)] def random_hex_color(): hex_digits = "89ABCDEF" color_code = "#" + "".join([random.choice(hex_digits) for _ in range(6)]) return color_code # Define the labels labels = ["Others", "Hamsters", "G. pigs", "Rats", "Mice", "Rabbits", "Cats", "Dogs"] # Define the data sizes = incrementing_array(len(labels)) # Define the colors colors = [random_hex_color() for i in range(len(labels))] # Create the tree map squarify.plot(sizes=sizes, label=labels, color=colors, alpha=0.7) # Add a title plt.title("Pet shops") # Remove the axis ticks and labels plt.axis("off") # Show the plot plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326349.ipynb
null
null
[{"Id": 129326349, "ScriptId": 2293839, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1660835, "CreationDate": "05/12/2023 19:58:31", "VersionNumber": 20.0, "Title": "Guide to Choosing the Best Chart for Your Data", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 506.0, "LinesInsertedFromPrevious": 251.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 255.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # # Data visualization # In the era of data-driven decision-making, effective data visualization has become a crucial tool for communicating insights and trends to diverse audiences. However, with an array of chart types available, selecting the most appropriate one for a particular dataset can be a daunting challenge. Numerous factors such as the nature of the data, the intended audience, and the objective of the visual display must be carefully considered. # This article aims to provide a comprehensive guide to selecting the optimal chart for your data, based on extensive research and best practices. Whether you are a novice or a seasoned data analyst, this guide will equip you with the necessary skills to create compelling and informative visualizations that effectively convey your message. Please note that this article focuses solely on chart selection and does not include any code. Without further ado, let us delve into the intricacies of chart selection. # # Table of contents # 1. [Comparing data](#1) # 1. Bar Chart # 1. Column Chart # 1. Stacked Chart # 1. Radar Chart # 2. [Composition and Components](#2) # 1. Pie Chart # 1. Doughnut Chart # 1. Pyramid Chart # 1. Treemap Chart # 1. Funnel Chart # 3. [Tracking data over time](#3) # 1. Line Chart # 1. Spline Chart # 1. Area Chart # 1. Candlestick Chart # 1. OHLC Chart # 1. Sparkline Chart # 4. [Analyzing distribution](#4) # 1. Scatter Chart # 1. Bubble Chart # 1. Box Chart # 1. Error Chart # 1. Heatmap # 1. Range Chart # 1. Polar Chart # 5. [Gauging Performance](#5) # 1. Circular Gauge # 1. Linear Gauge # 1. Bullet Chart # 6. [Project Data](#6) # 1. Gantt Chart # 7. [Geographical Data](#7) # 1. Choropleth Map # 1. Dot Map # 1. Bubble Map # 1. Flow Map # # Comparing data # ## Bar Chart and Column Chart # Bar and column charts are highly effective when visualizing data that involves only one variable. For example, if we want to compare the sales performance of different employees, bar and column charts are ideal. # # [Top](#0) import numpy as np import matplotlib.pyplot as plt sales = np.random.randint(3000, 10000, size=5) person = ["John", "Jake", "Peter", "Mary", "James"] # Create a figure and an axes. fig, ax = plt.subplots() # Plot the data on the axes as a bar chart. ax.barh(person, sales, color="blue") # Set the x-axis and y-axis labels. ax.set_xlabel("Sales") ax.set_ylabel("Person") # Add a title to the graph. ax.set_title("Bar chart of sales") # Show the plot. plt.show() import numpy as np import matplotlib.pyplot as plt sales = np.random.randint(3000, 10000, size=5) person = ["John", "Jake", "Peter", "Mary", "James"] # Create a figure and an axes. fig, ax = plt.subplots() # Plot the data on the axes. ax.bar(person, sales) # Set the x-axis and y-axis labels. ax.set_xlabel("Sales") ax.set_ylabel("Person") # Add a title to the graph. ax.set_title("Column chart of sales") # Show the plot. plt.show() # # The main difference between these two chart types is their orientation: bar charts are horizontal, while column charts are vertical. However, this difference is merely a matter of presentation and does not affect their function. In fact, bar and column charts are interchangeable and can be used to represent the same data. Regardless of their orientation, both chart types allow for clear and easy comparison of values, making them an excellent choice for analyzing and presenting data with a single variable. # ## Stacked chart and Grouped Bar chart # A stacked chart is a bar chart where each bar is divided into multiple segments, each representing a different category. Stacked charts are used to compare the relative contributions of different categories to a total. For example, a stacked bar chart could be used to compare the sales of different products in a company. Each bar would represent the total sales for a product, and the segments within the bar would represent the sales for each of the company's sales channels. Stacked charts provides a way to provide each categories as well as the sub-categories. # Grouped Bar chart is similar to stacked chart but provides a way to compare each sub-categories instead of the main categories. import matplotlib.pyplot as plt import numpy as np # Generate some random data categories = ["Store A", "Store B", "Store C", "Store D"] data = np.random.rand(4, 3) # Create a stacked bar chart fig, ax = plt.subplots() ax.bar(categories, data[:, 0], label="Main clothes") ax.bar(categories, data[:, 1], bottom=data[:, 0], label="Overcoats") ax.bar(categories, data[:, 2], bottom=data[:, 0] + data[:, 1], label="Accessories") # Add legend and labels ax.legend() ax.set_xlabel("Stores") ax.set_ylabel("Values") ax.set_title("Stacked Bar Chart") # Set the width of each bar bar_width = 0.2 # Set the positions of the bars on the x-axis r1 = np.arange(len(categories)) r2 = [x + bar_width for x in r1] r3 = [x + bar_width for x in r2] # Get group bar series_1 = [sub_array[0] for sub_array in data] series_2 = [sub_array[1] for sub_array in data] series_3 = [sub_array[2] for sub_array in data] # Create the grouped bar chart fig, ax = plt.subplots() ax.bar(r1, series_1, width=bar_width, label="Main clothes") ax.bar(r2, series_2, width=bar_width, label="Overcoats") ax.bar(r3, series_3, width=bar_width, label="Accessories") # Add legend and labels ax.set_xticks([r + bar_width for r in range(len(categories))]) ax.set_xticklabels(categories) ax.legend() ax.set_title("Grouped Column Chart") plt.show() # ## Radial chart # A radar chart, also known as a spider chart or a star chart, is a graphical method of displaying multivariate data in the form of a two-dimensional chart. In a radar chart, each variable is represented by a spoke or axis radiating from the center of the chart, and the values of each variable are plotted as a point or a line around the spokes. # It allows one to one comparison of various aspect of two or more items. It is used in fields like market research, sports, and social sciences. For example, if you want to compare the different attributes of two footballers or the share of each departments. import numpy as np import matplotlib.pyplot as plt # Generate random data for two companies and 8 variables companies = ["Company A", "Company B"] departments = [ "Sales", "Marketing", "HR", "Research", "Customer support", "Admin", "IT", "Legal", ] data = np.random.rand(2, 8) # Create a function to plot the radar chart def plot_radar_chart(data, variables, companies): # Calculate the angle for each variable angles = np.linspace(0, 2 * np.pi, len(variables), endpoint=False) angles = np.concatenate((angles, [angles[0]])) # Create the plot and set the axis labels and limits fig = plt.figure() ax = fig.add_subplot(111, polar=True) ax.set_theta_offset(np.deg2rad(90)) # Rotate plot by 90 degrees ax.set_theta_direction(-1) # Reverse the direction of the plot ax.set_xticklabels(variables) ax.set_ylim(0, 1) # Plot each player's data as a line for i in range(len(companies)): values = data[i, :] values = np.concatenate((values, [values[0]])) ax.plot(angles, values, label=companies[i]) # Add a legend and a title ax.legend(loc="upper right", bbox_to_anchor=(1.3, 1.1)) plt.title("Company Comparison") # Show the plot plt.show() # Call the function to plot the radar chart plot_radar_chart(data, departments, companies) # # # Composition and Components # This is used to compare a part of the data to the whole. # ## Pie chart # This assumes the total is 100%, and it simply compares their shares. import numpy as np import matplotlib.pyplot as plt # Generate random data for 5 categories categories = ["Category 1", "Category 2", "Category 3", "Category 4", "Category 5"] data = [30, 32, 55, 15, 50] # Create a pie chart plt.pie(data, labels=categories, autopct="%1.1f%%") # Add a title plt.title("Pie Chart") # Show the plot plt.show() # One of the main criticism of pie charts is that it focuses on the area, and makes it difficult to read (especially when there are many data), and also when comparing multiple pie charts. As a rule of thumb, it is usually better to compare data by using a bar chart. # Also, when values are too close, it's hard to tell which is greater or if there is a difference. For example, category 1 and category 2, or category 3 and category 5 # ## Doughnut chart import numpy as np import matplotlib.pyplot as plt # Generate random data for 5 categories categories = ["Category 1", "Category 2", "Category 3", "Category 4", "Category 5"] data = np.random.rand(5) * 1000 # Calculate the total sales total_sales = sum(data) # Create a donut chart fig, ax = plt.subplots() ax.pie( data, labels=categories, autopct="%1.1f%%", wedgeprops=dict(width=0.5), startangle=-40, ) # Add a white circle in the center of the chart circle = plt.Circle((0, 0), 0.5, color="white") fig.gca().add_artist(circle) # Add a title with the total sales value plt.text(0, 0, f"Total = {total_sales:.0f}", ha="center", va="center", fontsize=14) # Add a title to the chart plt.title("Donut Chart") # Show the plot plt.show() # This is a pie chart with the centre cut out, but it has many advantages over a pie chart for the following reasons # * It is more accurate depiction because it focuses on the arc and circle circumference # * It is a cleaner data and more readable # * It is more space efficient as you can use the centre for adding more information # ## Pyramid chart # In order to display a simple hierarchy, use a pyramid chart. It stacks data on top of each other, from least to most or vice versa. import matplotlib.pyplot as plt import numpy as np # Define the data labels = [ "Website visit", "Product viewed", "Interest showed", "Purchased", "Return customers", ] values = [50, 45, 35, 25, 10] # values = [10, 25, 35, 45, 60] # Set up the figure and axes fig, ax = plt.subplots() # Plot the left pyramid ax.barh(labels, -np.array(values), align="center", height=0.5, color="red") # Plot the right pyramid ax.barh(labels, np.array(values), align="center", height=0.5, color="red") # Set the limits and labels ax.set_xlim([-max(values), max(values)]) ax.set_xlabel("Users") ax.set_title("Pyramid Chart") # Show the plot plt.show() # ## Tree maps # It compares a lot of data by using nested rectangles. The sizes of each rectangle indicate the relative size of each data, hence it's easy to spot patterns in treemap charts. import matplotlib.pyplot as plt import squarify import random def incrementing_array(size, initial_value=1): return [initial_value + random.randint(i, i**2) for i in range(size)] def random_hex_color(): hex_digits = "89ABCDEF" color_code = "#" + "".join([random.choice(hex_digits) for _ in range(6)]) return color_code # Define the labels labels = ["Others", "Hamsters", "G. pigs", "Rats", "Mice", "Rabbits", "Cats", "Dogs"] # Define the data sizes = incrementing_array(len(labels)) # Define the colors colors = [random_hex_color() for i in range(len(labels))] # Create the tree map squarify.plot(sizes=sizes, label=labels, color=colors, alpha=0.7) # Add a title plt.title("Pet shops") # Remove the axis ticks and labels plt.axis("off") # Show the plot plt.show()
false
0
3,495
0
3,495
3,495
129326630
from sklearn.feature_extraction.text import CountVectorizer tweet1 = "Hari ini cuaca cerah" tweet2 = "Besok cuaca hujan" cv = CountVectorizer() bow = cv.fit_transform([tweet1, tweet2]) print(bow.toarray()) from sklearn.feature_extraction.text import TfidfVectorizer tweet1 = "Partai kami siap memenangkan pemilihan ini!" tweet2 = "Tidak ada kata menyerah dalam partai kami." tweet3 = "Partai politik harus mampu menjadi penyeimbang kebijakan pemerintah." tfidf = TfidfVectorizer() tfidf_vector = tfidf.fit_transform([tweet1, tweet2, tweet3]) partai_index = tfidf.vocabulary_["partai"] print("Nilai TF-IDF kata 'partai' pada setiap tweet:") for i in range(tfidf_vector.shape[0]): print("Tweet", i + 1, ":", tfidf_vector[i, partai_index]) import gensim.downloader as api model = api.load("word2vec-google-news-300") word1 = "politikus" word2 = "pemimpin" vector1 = model[word1] vector2 = model[word2] similarity = model.similarity(word1, word2) print( "Cosine similarity antara kata '", word1, "' dan '", word2, "' adalah", similarity ) print("Vektor word embedding untuk kata '", word1, "':", vector1) print("Vektor word embedding untuk kata '", word2, "':", vector2) from textblob import TextBlob kalimat_bahagia = "Saya sangat bahagia hari ini!" kalimat_marah = "Saya sangat marah karena ujian dibatalkan!" blob_bahagia = TextBlob(kalimat_bahagia) blob_marah = TextBlob(kalimat_marah) sentimen_bahagia = blob_bahagia.sentiment.polarity sentimen_marah = blob_marah.sentiment.polarity print("Sentimen dari kalimat '", kalimat_bahagia, "' adalah", sentimen_bahagia) print("Sentimen dari kalimat '", kalimat_marah, "' adalah", sentimen_marah) # Import library yang dibutuhkan import tweepy # Konfigurasi API Twitter consumer_key = "YOUR_CONSUMER_KEY" consumer_secret = "YOUR_CONSUMER_SECRET" access_token = "YOUR_ACCESS_TOKEN" access_token_secret = "YOUR_ACCESS_TOKEN_SECRET" # Autentikasi API Twitter auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) # Membuat objek API dengan autentikasi api = tweepy.API(auth) # Mencari tweet dengan hashtag #lampung query_hashtag = "#lampung" tweets_hashtag = api.search_tweets(q=query_hashtag) # Mencari tweet yang mem-mention @bima query_mention = "@bima" tweets_mention = api.search_tweets(q=query_mention) # Menampilkan hasil pencarian print("Tweet dengan hashtag", query_hashtag) for tweet in tweets_hashtag: print(tweet.text) print("\nTweet yang mem-mention", query_mention) for tweet in tweets_mention: print(tweet.text)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326630.ipynb
null
null
[{"Id": 129326630, "ScriptId": 38446890, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14545408, "CreationDate": "05/12/2023 20:02:37", "VersionNumber": 1.0, "Title": "notebookee0e9ad260", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from sklearn.feature_extraction.text import CountVectorizer tweet1 = "Hari ini cuaca cerah" tweet2 = "Besok cuaca hujan" cv = CountVectorizer() bow = cv.fit_transform([tweet1, tweet2]) print(bow.toarray()) from sklearn.feature_extraction.text import TfidfVectorizer tweet1 = "Partai kami siap memenangkan pemilihan ini!" tweet2 = "Tidak ada kata menyerah dalam partai kami." tweet3 = "Partai politik harus mampu menjadi penyeimbang kebijakan pemerintah." tfidf = TfidfVectorizer() tfidf_vector = tfidf.fit_transform([tweet1, tweet2, tweet3]) partai_index = tfidf.vocabulary_["partai"] print("Nilai TF-IDF kata 'partai' pada setiap tweet:") for i in range(tfidf_vector.shape[0]): print("Tweet", i + 1, ":", tfidf_vector[i, partai_index]) import gensim.downloader as api model = api.load("word2vec-google-news-300") word1 = "politikus" word2 = "pemimpin" vector1 = model[word1] vector2 = model[word2] similarity = model.similarity(word1, word2) print( "Cosine similarity antara kata '", word1, "' dan '", word2, "' adalah", similarity ) print("Vektor word embedding untuk kata '", word1, "':", vector1) print("Vektor word embedding untuk kata '", word2, "':", vector2) from textblob import TextBlob kalimat_bahagia = "Saya sangat bahagia hari ini!" kalimat_marah = "Saya sangat marah karena ujian dibatalkan!" blob_bahagia = TextBlob(kalimat_bahagia) blob_marah = TextBlob(kalimat_marah) sentimen_bahagia = blob_bahagia.sentiment.polarity sentimen_marah = blob_marah.sentiment.polarity print("Sentimen dari kalimat '", kalimat_bahagia, "' adalah", sentimen_bahagia) print("Sentimen dari kalimat '", kalimat_marah, "' adalah", sentimen_marah) # Import library yang dibutuhkan import tweepy # Konfigurasi API Twitter consumer_key = "YOUR_CONSUMER_KEY" consumer_secret = "YOUR_CONSUMER_SECRET" access_token = "YOUR_ACCESS_TOKEN" access_token_secret = "YOUR_ACCESS_TOKEN_SECRET" # Autentikasi API Twitter auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) # Membuat objek API dengan autentikasi api = tweepy.API(auth) # Mencari tweet dengan hashtag #lampung query_hashtag = "#lampung" tweets_hashtag = api.search_tweets(q=query_hashtag) # Mencari tweet yang mem-mention @bima query_mention = "@bima" tweets_mention = api.search_tweets(q=query_mention) # Menampilkan hasil pencarian print("Tweet dengan hashtag", query_hashtag) for tweet in tweets_hashtag: print(tweet.text) print("\nTweet yang mem-mention", query_mention) for tweet in tweets_mention: print(tweet.text)
false
0
902
0
902
902
129326221
num = int(input("enter a num")) sum = 0 for i in range(1, num): if num % i == 0: sum += i if sum == num: print("The number is a perfect number") else: print("The number is not a perfect number") a = input("Enter a string is:") rev = a[::-1] if rev == a: print("The string is palindrome") else: print("The string is not palindrome") num = int(input("Enter a number")) sum = 0 for i in range(1, num): if num % i == 0: sum += i if sum == num: print("The number is perfect") else: print("The numbers is not perfect") # Let's take the input of the string a = input("Enter a string") b = input("Enter a string to remove") re = a.replace(b, "") print(re) string1 = input("Enter a string") string2 = input("Enter a string") index = string1.rfind(string2) if index == -1: print("Index level is not found") else: print("Index level is:", index) num = input("Enter a number") n = len(num) sum = 0 for i in num: sum += int(i) ** n if sum == int(num): print("The num is armstronge") else: print("The num is not armstronge") a = int(input("Enter a string")) b = int(input("Enter a in 2nd string")) a = a + b b = a - b a = a - b print("After swapping of two numbers") print(a) print(b) num = int(input("Enter a number")) sum = 0 for i in range(1, num): sum += i if sum == num: print("Say the number is perfect") else: print("The number is not perfect") string = input("Enter a string") string1 = input("Enter a sub string") rev = string.rfind(string1) if rev == -1: print("The index is not found") else: print("The index lvl is", rev) num = input("Enter a number") n = len(num) sum = 0 for i in num: sum += int(i) ** n if sum == int(num): print("The number is armstrong") else: print("The number is not armstrong") num1 = int(input("Enter a first number")) num2 = int(input("Enter b number")) if num1 > num2: num1, num2 = num2, num1 for i in range(num2, num1 - 1, -1): if i % 2 == 0: print("The biggest even number is :", num1, "and", num2, "is", i) break num1 = int(input("Enter a number")) num2 = int(input("Enter b number")) if num1 > num2: num1, num2 = num2, num1 for i in range(num2, num1 - 1, -1): if i % 2 == 0: print("The biggest even number is", num1, "and", num2, "is", i) break n = int(input("Enter a number")) harmonic_sum = 0 for i in range(1, n): harmonic_sum += 1 / i print("The Harmonic sum of", n - 1, "is", harmonic_sum) n = int(input("Enter a number is :")) har_sum = 0 for i in range(1, n): har_sum += 1 / i print("The har_sum of n is", n - 1, "is", har_sum) # Please ensure the platform IDE is in Python 3.x mode. num = input("Enter a number: ") steps = 0 while len(num) > 1: num = str(sum(int(d) for d in num)) steps += 1 print("Step-{} Sum: {}".format(steps, num)) import math def is_prime(n): """ This function checks whether a given number is prime or not. """ if n <= 1: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True passage = input("Enter the passage: ") words = passage.split() prime_words = [] for word in words: if is_prime(len(word)): prime_words.append(word) print("Prime words in the passage are: ") for word in prime_words: print(word) import matplotlib.pyplot as plt import numpy as np x = np.arange(0, 10, 0.5) y = np.sin(x) plt.figure(figsize=(5, 2)) plt.title("Sine Curve") plt.subplot(121) plt.plot(x, y) plt.show() x = np.arange(0, 10, 0.5) y = np.cos(x) plt.figure(figsize=(5, 2)) plt.title("By cose") plt.subplot(122) plt.plot(x, y) plt.show() import datetime as datetime now = datetime.datetime.now() print(now) my_date = datetime.date(2020, 3, 23) print(my_date) my_time = datetime.time(12, 5, 9) print(my_time) na = datetime.datetime(2022, 10, 14, 12, 30, 25) print(na) from bs4 import BeautifulSoup html_doc = """ <html> <head> <title>My Web Page</title> </head> <body> <h1>Welcome to my Web Page</h1> <p>This is some text.</p> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> </body> </html> """ soup = BeautifulSoup(html_doc, "html.parser") print(soup.prettify()) soup = BeautifulSoup(html_doc, "html.parser") h1_tag = soup.find("p") print(h1_tag.text) import pandas as pd import numpy as np nn = pd.DataFrame({"name": ["Alice", "mois", "moin"], "age": [12, 35, 56]}) nn arr = np.array([1, 2, 3, 4, 5]) arr_sqrt = arr**2 print(arr_sqrt) arr_sqrt.mean() np.arange(10) nazar = np.arange(10) mm = nazar.reshape(2, 5) print(mm) master = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) hh = np.transpose(master) print(hh) import numpy as np import pandas as pd arr = np.array([1, 2, 3, 4, 5]) arr arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] nn = np.transpose(arr) nn ar = np.array([[9, 8, 0], [7, 3, 5]]) arr = ar.flatten() print(arr) arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) row = arr2d[1, 0:2] print(row) arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) n = arr[0, :, :] print(n) nn = np.array([[1, 2, 3], [4, 5, 6], [8, 9, 10]]) t = nn[1, 0] print(t) import math def is_prime(n): if n < 2: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True def find_prime_length_words(sentence): words = sentence.split() prime_length_words = [] for word in words: if is_prime(len(word)): prime_length_words.append(word) return prime_length_words # Example usage input_sentence = "The quick brown fox jumps over the lazy dog." output_words = find_prime_length_words(input_sentence) print("Output:", " ".join(output_words)) import math def is_prime(n): if n < 2: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True def find_prime_length_words(sentence): words = sentence.split() for word in words: if is_prime(len(word)): prime_length_words.append(words) return prime_length_words # Example usage input_sentence = "The quick brown fox jumps over the lazy dog" output_words = find_prime_length_words(input_sentence) print("output:", "".join(output_words)) def print_poem(poem): lines = poem.split("\n") for line in lines: line = line.strip() if line: print("\t" + line) print("\t\t" + "\t".join(line.split())) # Example usage input_poem = """Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!""" print_poem(input_poem) poem = "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!" def poem_print(poem): lines = poem.split("\n") for line in lines: line = line.strip() if line: print("\t" + line) print("\t\t" + "\t".join(line.strip())) input_poem = "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!" poem_print(input_poem) def send_email_greeting(first_name, last_name): greeting = "Hello " + last_name + " " + first_name return greeting # Example usage first_name = "Dany" last_name = "Boon" email_greeting = send_email_greeting(first_name, last_name) print(email_greeting) def master(first, last): m = "Hello " + last + " " + first return m first = "Nazar" last = "Mohammed" email = master(first, last) print(email) r = 6 a = 4 / 3 * 3.142 * r**3 a def volume(r): a = 4 / 3 * 3.1415926535897931 * r**3 return a volume(6) import math def calculate_sphere_volume(radius): volume = (4 / 3) * math.pi * radius**3 return volume radius = 6.0 sphere_volume = calculate_sphere_volume(radius) print(sphere_volume) import math def calc(r): volume = (4 / 3) * math.pi * radius**3 return volume r = 6 sphere = calc(r) print(sphere) import math def nazar(r): v = (4 / 3) * math.pi * r**3 return v tt = 6.0 v = nazar(tt) print(v) def pp(x, y): a = (x + y) * (x + y) return a x = int(input()) y = int(input()) nn = pp(x, y) print(nn) def cac(a, b, c): nn = a + b + c return nn mm = cac(1, 2, -8) print(mm) def cc(lst): total = sum(lst) return total lst = [1, 2, -8] result = cc(lst) print(result) def multi(lst): result = 1 for num in lst: result *= num return result input_list = [1, 2, -8] result = multi(input_list) print(result) n = [50] for i in n: if i % 2 == 0: print("It is a positive number") else: print("It is a negative number") def positive(n=50): for i in n: if i % 2 == 0: print("It is a positive number") else: print("It is a negative number") positive(n) num = float(input("Input a number: ")) if num > 0: print("It is positive number") elif num == 0: print("It is Zero") else: print("It is a negative number") number = float(input("Enter a number: ")) if number > 0: print("It is a positive number") elif number < 0: print("It is a negative number") else: print("It is zero") num = float(input("Enter a num is:")) if num > 0: print("It is a positive number") elif num < 0: print("It is a negative number") else: print("It is zero") number = [50] for i in number: if i % 2 == 0: print("positive") else: print("negative") import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt a = [1, 2, 3, 4, 5, 8, 3, 9, 10] b = [2, 8, 5, 4, 3, 1, 0, 9, 4] plt.figure(figsize=(5, 3)) plt.plot(a, b, color="red") plt.xlabel("x-axis") plt.ylabel("y-axis") plt.title("Plotting the Graph") plt.show() import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/netflix-titles/netflix_titles.csv") df.head() df.isnull().sum() df1 = df.fillna(value="unknown") df1.isnull().sum() df1.columns # Print the column names with spaces visible print([col.strip() for col in df1.columns]) # Reset the index df1.reset_index(drop=True, inplace=True) # Print the column names print(df1.columns) # Count the number of movies and TV shows movie_count = df1[df1["type"] == "Movie"].shape[0] tv_show_count = df1[df1["type"] == "TV Show"].shape[0] # Create a bar chart labels = ["Movies", "TV Shows"] counts = [movie_count, tv_show_count] plt.bar(labels, counts) plt.xlabel("Content Type") plt.ylabel("Count") plt.title("Number of Movies vs TV Shows on Netflix") plt.show() import matplotlib.pyplot as plt import pandas as pd # Count the occurrence of each genre genre_counts = df1["listed_in"].value_counts().head(10) # Create a bar chart plt.bar(genre_counts.index, genre_counts.values) plt.xlabel("Genre") plt.ylabel("Count") plt.title("Top 10 Most Popular Genres on Netflix") plt.xticks(rotation=45, ha="right") # Rotate x-axis labels for better readability plt.tight_layout() # Adjust spacing plt.show() # Create a box plot plt.boxplot(df1["rating"]) plt.ylabel("Rating") plt.title("Distribution of Ratings on Netflix") plt.show() import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = sns.load_dataset("titanic") df.head() df.isnull().sum() df.mean() df1 = df.fillna(df.mean()) df1.isnull().sum() df1["sex"].unique() # Convert the data type of the Age column to numeric df1["age"] = pd.to_numeric(df1["age"], errors="coerce") # Verify the updated data type of the Age column print("Data type of the Age column:", df1["age"].dtype) # Create a box plot to visualize the distribution of age sns.boxplot(x="age", data=df1) plt.title("Distribution of Age") plt.show() # Create a scatter plot to visualize the relationship between age and survival sns.scatterplot(x="age", y="survived", hue="sex", data=df1) plt.title("Age vs. Survival") plt.show() # Create a bar chart to show the count of passengers by gender sns.countplot(x="sex", data=df1) plt.title("Passenger Count by Gender") plt.show() # Create a count plot to show the count of survivors by gender sns.countplot(x="survived", hue="sex", data=df1) plt.title("Survivor Count by Gender") plt.show() # Create a heatmap to visualize the relationship between age, gender, and survival age_gender_survival = df1.pivot_table(index="age", columns="sex", values="survived") sns.heatmap(age_gender_survival, cmap="coolwarm", annot=True, fmt=".0%", cbar=True) plt.title("Survival Rate by Age and Gender") plt.show() df = sns.load_dataset("iris") import seaborn as sns import matplotlib.pyplot as plt # Load the Iris dataset df = sns.load_dataset("iris") # Scatterplot: Sepal Length vs. Sepal Width sns.scatterplot(x="sepal_length", y="sepal_width", hue="species", data=df) plt.title("Scatterplot: Sepal Length vs. Sepal Width") plt.show() # Boxplot: Petal Length by Species sns.boxplot(x="species", y="petal_length", data=df) plt.title("Boxplot: Petal Length by Species") plt.show() # Violinplot: Petal Width by Species sns.violinplot(x="species", y="petal_width", data=df) plt.title("Violinplot: Petal Width by Species") plt.show() # Pairplot: Pairwise Relationships and Distributions sns.pairplot(df, hue="species") plt.title("Pairplot: Pairwise Relationships and Distributions") plt.show() # Heatmap: Correlation Matrix correlation = df.corr() sns.heatmap(correlation, annot=True, cmap="coolwarm") plt.title("Correlation Heatmap") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326221.ipynb
null
null
[{"Id": 129326221, "ScriptId": 37853564, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12421240, "CreationDate": "05/12/2023 19:56:52", "VersionNumber": 1.0, "Title": "trend", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 1045.0, "LinesInsertedFromPrevious": 1045.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
num = int(input("enter a num")) sum = 0 for i in range(1, num): if num % i == 0: sum += i if sum == num: print("The number is a perfect number") else: print("The number is not a perfect number") a = input("Enter a string is:") rev = a[::-1] if rev == a: print("The string is palindrome") else: print("The string is not palindrome") num = int(input("Enter a number")) sum = 0 for i in range(1, num): if num % i == 0: sum += i if sum == num: print("The number is perfect") else: print("The numbers is not perfect") # Let's take the input of the string a = input("Enter a string") b = input("Enter a string to remove") re = a.replace(b, "") print(re) string1 = input("Enter a string") string2 = input("Enter a string") index = string1.rfind(string2) if index == -1: print("Index level is not found") else: print("Index level is:", index) num = input("Enter a number") n = len(num) sum = 0 for i in num: sum += int(i) ** n if sum == int(num): print("The num is armstronge") else: print("The num is not armstronge") a = int(input("Enter a string")) b = int(input("Enter a in 2nd string")) a = a + b b = a - b a = a - b print("After swapping of two numbers") print(a) print(b) num = int(input("Enter a number")) sum = 0 for i in range(1, num): sum += i if sum == num: print("Say the number is perfect") else: print("The number is not perfect") string = input("Enter a string") string1 = input("Enter a sub string") rev = string.rfind(string1) if rev == -1: print("The index is not found") else: print("The index lvl is", rev) num = input("Enter a number") n = len(num) sum = 0 for i in num: sum += int(i) ** n if sum == int(num): print("The number is armstrong") else: print("The number is not armstrong") num1 = int(input("Enter a first number")) num2 = int(input("Enter b number")) if num1 > num2: num1, num2 = num2, num1 for i in range(num2, num1 - 1, -1): if i % 2 == 0: print("The biggest even number is :", num1, "and", num2, "is", i) break num1 = int(input("Enter a number")) num2 = int(input("Enter b number")) if num1 > num2: num1, num2 = num2, num1 for i in range(num2, num1 - 1, -1): if i % 2 == 0: print("The biggest even number is", num1, "and", num2, "is", i) break n = int(input("Enter a number")) harmonic_sum = 0 for i in range(1, n): harmonic_sum += 1 / i print("The Harmonic sum of", n - 1, "is", harmonic_sum) n = int(input("Enter a number is :")) har_sum = 0 for i in range(1, n): har_sum += 1 / i print("The har_sum of n is", n - 1, "is", har_sum) # Please ensure the platform IDE is in Python 3.x mode. num = input("Enter a number: ") steps = 0 while len(num) > 1: num = str(sum(int(d) for d in num)) steps += 1 print("Step-{} Sum: {}".format(steps, num)) import math def is_prime(n): """ This function checks whether a given number is prime or not. """ if n <= 1: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True passage = input("Enter the passage: ") words = passage.split() prime_words = [] for word in words: if is_prime(len(word)): prime_words.append(word) print("Prime words in the passage are: ") for word in prime_words: print(word) import matplotlib.pyplot as plt import numpy as np x = np.arange(0, 10, 0.5) y = np.sin(x) plt.figure(figsize=(5, 2)) plt.title("Sine Curve") plt.subplot(121) plt.plot(x, y) plt.show() x = np.arange(0, 10, 0.5) y = np.cos(x) plt.figure(figsize=(5, 2)) plt.title("By cose") plt.subplot(122) plt.plot(x, y) plt.show() import datetime as datetime now = datetime.datetime.now() print(now) my_date = datetime.date(2020, 3, 23) print(my_date) my_time = datetime.time(12, 5, 9) print(my_time) na = datetime.datetime(2022, 10, 14, 12, 30, 25) print(na) from bs4 import BeautifulSoup html_doc = """ <html> <head> <title>My Web Page</title> </head> <body> <h1>Welcome to my Web Page</h1> <p>This is some text.</p> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> </body> </html> """ soup = BeautifulSoup(html_doc, "html.parser") print(soup.prettify()) soup = BeautifulSoup(html_doc, "html.parser") h1_tag = soup.find("p") print(h1_tag.text) import pandas as pd import numpy as np nn = pd.DataFrame({"name": ["Alice", "mois", "moin"], "age": [12, 35, 56]}) nn arr = np.array([1, 2, 3, 4, 5]) arr_sqrt = arr**2 print(arr_sqrt) arr_sqrt.mean() np.arange(10) nazar = np.arange(10) mm = nazar.reshape(2, 5) print(mm) master = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) hh = np.transpose(master) print(hh) import numpy as np import pandas as pd arr = np.array([1, 2, 3, 4, 5]) arr arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] nn = np.transpose(arr) nn ar = np.array([[9, 8, 0], [7, 3, 5]]) arr = ar.flatten() print(arr) arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) row = arr2d[1, 0:2] print(row) arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) n = arr[0, :, :] print(n) nn = np.array([[1, 2, 3], [4, 5, 6], [8, 9, 10]]) t = nn[1, 0] print(t) import math def is_prime(n): if n < 2: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True def find_prime_length_words(sentence): words = sentence.split() prime_length_words = [] for word in words: if is_prime(len(word)): prime_length_words.append(word) return prime_length_words # Example usage input_sentence = "The quick brown fox jumps over the lazy dog." output_words = find_prime_length_words(input_sentence) print("Output:", " ".join(output_words)) import math def is_prime(n): if n < 2: return False for i in range(2, int(math.sqrt(n)) + 1): if n % i == 0: return False return True def find_prime_length_words(sentence): words = sentence.split() for word in words: if is_prime(len(word)): prime_length_words.append(words) return prime_length_words # Example usage input_sentence = "The quick brown fox jumps over the lazy dog" output_words = find_prime_length_words(input_sentence) print("output:", "".join(output_words)) def print_poem(poem): lines = poem.split("\n") for line in lines: line = line.strip() if line: print("\t" + line) print("\t\t" + "\t".join(line.split())) # Example usage input_poem = """Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!""" print_poem(input_poem) poem = "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!" def poem_print(poem): lines = poem.split("\n") for line in lines: line = line.strip() if line: print("\t" + line) print("\t\t" + "\t".join(line.strip())) input_poem = "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are!" poem_print(input_poem) def send_email_greeting(first_name, last_name): greeting = "Hello " + last_name + " " + first_name return greeting # Example usage first_name = "Dany" last_name = "Boon" email_greeting = send_email_greeting(first_name, last_name) print(email_greeting) def master(first, last): m = "Hello " + last + " " + first return m first = "Nazar" last = "Mohammed" email = master(first, last) print(email) r = 6 a = 4 / 3 * 3.142 * r**3 a def volume(r): a = 4 / 3 * 3.1415926535897931 * r**3 return a volume(6) import math def calculate_sphere_volume(radius): volume = (4 / 3) * math.pi * radius**3 return volume radius = 6.0 sphere_volume = calculate_sphere_volume(radius) print(sphere_volume) import math def calc(r): volume = (4 / 3) * math.pi * radius**3 return volume r = 6 sphere = calc(r) print(sphere) import math def nazar(r): v = (4 / 3) * math.pi * r**3 return v tt = 6.0 v = nazar(tt) print(v) def pp(x, y): a = (x + y) * (x + y) return a x = int(input()) y = int(input()) nn = pp(x, y) print(nn) def cac(a, b, c): nn = a + b + c return nn mm = cac(1, 2, -8) print(mm) def cc(lst): total = sum(lst) return total lst = [1, 2, -8] result = cc(lst) print(result) def multi(lst): result = 1 for num in lst: result *= num return result input_list = [1, 2, -8] result = multi(input_list) print(result) n = [50] for i in n: if i % 2 == 0: print("It is a positive number") else: print("It is a negative number") def positive(n=50): for i in n: if i % 2 == 0: print("It is a positive number") else: print("It is a negative number") positive(n) num = float(input("Input a number: ")) if num > 0: print("It is positive number") elif num == 0: print("It is Zero") else: print("It is a negative number") number = float(input("Enter a number: ")) if number > 0: print("It is a positive number") elif number < 0: print("It is a negative number") else: print("It is zero") num = float(input("Enter a num is:")) if num > 0: print("It is a positive number") elif num < 0: print("It is a negative number") else: print("It is zero") number = [50] for i in number: if i % 2 == 0: print("positive") else: print("negative") import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt a = [1, 2, 3, 4, 5, 8, 3, 9, 10] b = [2, 8, 5, 4, 3, 1, 0, 9, 4] plt.figure(figsize=(5, 3)) plt.plot(a, b, color="red") plt.xlabel("x-axis") plt.ylabel("y-axis") plt.title("Plotting the Graph") plt.show() import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/netflix-titles/netflix_titles.csv") df.head() df.isnull().sum() df1 = df.fillna(value="unknown") df1.isnull().sum() df1.columns # Print the column names with spaces visible print([col.strip() for col in df1.columns]) # Reset the index df1.reset_index(drop=True, inplace=True) # Print the column names print(df1.columns) # Count the number of movies and TV shows movie_count = df1[df1["type"] == "Movie"].shape[0] tv_show_count = df1[df1["type"] == "TV Show"].shape[0] # Create a bar chart labels = ["Movies", "TV Shows"] counts = [movie_count, tv_show_count] plt.bar(labels, counts) plt.xlabel("Content Type") plt.ylabel("Count") plt.title("Number of Movies vs TV Shows on Netflix") plt.show() import matplotlib.pyplot as plt import pandas as pd # Count the occurrence of each genre genre_counts = df1["listed_in"].value_counts().head(10) # Create a bar chart plt.bar(genre_counts.index, genre_counts.values) plt.xlabel("Genre") plt.ylabel("Count") plt.title("Top 10 Most Popular Genres on Netflix") plt.xticks(rotation=45, ha="right") # Rotate x-axis labels for better readability plt.tight_layout() # Adjust spacing plt.show() # Create a box plot plt.boxplot(df1["rating"]) plt.ylabel("Rating") plt.title("Distribution of Ratings on Netflix") plt.show() import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = sns.load_dataset("titanic") df.head() df.isnull().sum() df.mean() df1 = df.fillna(df.mean()) df1.isnull().sum() df1["sex"].unique() # Convert the data type of the Age column to numeric df1["age"] = pd.to_numeric(df1["age"], errors="coerce") # Verify the updated data type of the Age column print("Data type of the Age column:", df1["age"].dtype) # Create a box plot to visualize the distribution of age sns.boxplot(x="age", data=df1) plt.title("Distribution of Age") plt.show() # Create a scatter plot to visualize the relationship between age and survival sns.scatterplot(x="age", y="survived", hue="sex", data=df1) plt.title("Age vs. Survival") plt.show() # Create a bar chart to show the count of passengers by gender sns.countplot(x="sex", data=df1) plt.title("Passenger Count by Gender") plt.show() # Create a count plot to show the count of survivors by gender sns.countplot(x="survived", hue="sex", data=df1) plt.title("Survivor Count by Gender") plt.show() # Create a heatmap to visualize the relationship between age, gender, and survival age_gender_survival = df1.pivot_table(index="age", columns="sex", values="survived") sns.heatmap(age_gender_survival, cmap="coolwarm", annot=True, fmt=".0%", cbar=True) plt.title("Survival Rate by Age and Gender") plt.show() df = sns.load_dataset("iris") import seaborn as sns import matplotlib.pyplot as plt # Load the Iris dataset df = sns.load_dataset("iris") # Scatterplot: Sepal Length vs. Sepal Width sns.scatterplot(x="sepal_length", y="sepal_width", hue="species", data=df) plt.title("Scatterplot: Sepal Length vs. Sepal Width") plt.show() # Boxplot: Petal Length by Species sns.boxplot(x="species", y="petal_length", data=df) plt.title("Boxplot: Petal Length by Species") plt.show() # Violinplot: Petal Width by Species sns.violinplot(x="species", y="petal_width", data=df) plt.title("Violinplot: Petal Width by Species") plt.show() # Pairplot: Pairwise Relationships and Distributions sns.pairplot(df, hue="species") plt.title("Pairplot: Pairwise Relationships and Distributions") plt.show() # Heatmap: Correlation Matrix correlation = df.corr() sns.heatmap(correlation, annot=True, cmap="coolwarm") plt.title("Correlation Heatmap") plt.show()
false
0
4,887
0
4,887
4,887
129326444
<jupyter_start><jupyter_text>1000_companies_profit The dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines. **Includes** R&D Spend float64 Administration float64 Marketing Spend float64 State object Profit float64 Kaggle dataset identifier: 1000-companies-profit <jupyter_code>import pandas as pd df = pd.read_csv('1000-companies-profit/1000_Companies.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1000 entries, 0 to 999 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 R&D Spend 1000 non-null float64 1 Administration 1000 non-null float64 2 Marketing Spend 1000 non-null float64 3 State 1000 non-null object 4 Profit 1000 non-null float64 dtypes: float64(4), object(1) memory usage: 39.2+ KB <jupyter_text>Examples: { "R&D Spend": 165349.2, "Administration": 136897.8, "Marketing Spend": 471784.1, "State": "New York", "Profit": 192261.83 } { "R&D Spend": 162597.7, "Administration": 151377.59, "Marketing Spend": 443898.53, "State": "California", "Profit": 191792.06 } { "R&D Spend": 153441.51, "Administration": 101145.55, "Marketing Spend": 407934.54, "State": "Florida", "Profit": 191050.39 } { "R&D Spend": 144372.41, "Administration": 118671.85, "Marketing Spend": 383199.62, "State": "New York", "Profit": 182901.99 } <jupyter_script># # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend # The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies. # By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn # ## Loading Data df = pd.read_csv("/kaggle/input/1000-companies-profit/1000_Companies.csv") df.shape df.sample(10) df.isnull().sum() df.corr() plt.scatter(df["R&D Spend"], df["Profit"]) plt.xlabel("R&D Spend") plt.ylabel("Profit") plt.scatter(df["Administration"], df["Profit"]) plt.xlabel("Administration") plt.ylabel("Profit") plt.scatter(df["Marketing Spend"], df["Profit"]) plt.xlabel("Marketing Spend") plt.ylabel("Profit") # ## Spliting Dataset from sklearn.model_selection import train_test_split X, y = df[["R&D Spend", "Administration", "Marketing Spend"]], df["Profit"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=42 ) # ## Training Dataset using Linear Regression from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) # ## Predicting Dataset clf.predict([[78013.11, 121597.5500, 264346.0600]]) clf.predict(X_test) def start(): R_D = int(input("Enter Amout Spend in Research and development:")) Admin = int(input("Enter Administration expenses:")) Mar = int(input("Enter Marketing Spend Amount")) print("Estimated Profit:", clf.predict([[R_D, Admin, Mar]])) start()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326444.ipynb
1000-companies-profit
rupakroy
[{"Id": 129326444, "ScriptId": 38370784, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14110262, "CreationDate": "05/12/2023 19:59:51", "VersionNumber": 2.0, "Title": "Linear Regression Multiple Variables", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 70.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 61.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185269128, "KernelVersionId": 129326444, "SourceDatasetVersionId": 3105372}]
[{"Id": 3105372, "DatasetId": 1896237, "DatasourceVersionId": 3154274, "CreatorUserId": 3072182, "LicenseName": "CC0: Public Domain", "CreationDate": "01/28/2022 10:49:42", "VersionNumber": 1.0, "Title": "1000_companies_profit", "Slug": "1000-companies-profit", "Subtitle": "1000 Companies operating cost sample data list for building regression usecases", "Description": "The dataset includes sample data of 1000 startup companies operating cost and their profit. Well-formatted dataset for building ML regression pipelines.\n**Includes**\nR&D Spend float64\nAdministration float64\nMarketing Spend float64\nState object\nProfit float64", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1896237, "CreatorUserId": 3072182, "OwnerUserId": 3072182.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3105372.0, "CurrentDatasourceVersionId": 3154274.0, "ForumId": 1919554, "Type": 2, "CreationDate": "01/28/2022 10:49:42", "LastActivityDate": "01/28/2022", "TotalViews": 3171, "TotalDownloads": 826, "TotalVotes": 10, "TotalKernels": 10}]
[{"Id": 3072182, "UserName": "rupakroy", "DisplayName": "Rupak Roy/ Bob", "RegisterDate": "04/11/2019", "PerformanceTier": 2}]
# # Predicting Profit using Multiple Linear Regression Model based on R&D Spend, Administration, and Marketing Spend # The model I have created uses Linear Regression to predict the profit of a company based on its investment in Research and Development (R&D), Administration, and Marketing Spend. The dataset used to train the model contains information on these three variables and the corresponding profits earned by various companies. # By analyzing the data, the model has learned to identify the relationships between the input variables and the target variable (profit), and can use this knowledge to make predictions on new data. The model can be used to help businesses make informed decisions about their investments by providing a reliable estimate of the expected import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn # ## Loading Data df = pd.read_csv("/kaggle/input/1000-companies-profit/1000_Companies.csv") df.shape df.sample(10) df.isnull().sum() df.corr() plt.scatter(df["R&D Spend"], df["Profit"]) plt.xlabel("R&D Spend") plt.ylabel("Profit") plt.scatter(df["Administration"], df["Profit"]) plt.xlabel("Administration") plt.ylabel("Profit") plt.scatter(df["Marketing Spend"], df["Profit"]) plt.xlabel("Marketing Spend") plt.ylabel("Profit") # ## Spliting Dataset from sklearn.model_selection import train_test_split X, y = df[["R&D Spend", "Administration", "Marketing Spend"]], df["Profit"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=42 ) # ## Training Dataset using Linear Regression from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) # ## Predicting Dataset clf.predict([[78013.11, 121597.5500, 264346.0600]]) clf.predict(X_test) def start(): R_D = int(input("Enter Amout Spend in Research and development:")) Admin = int(input("Enter Administration expenses:")) Mar = int(input("Enter Marketing Spend Amount")) print("Estimated Profit:", clf.predict([[R_D, Admin, Mar]])) start()
[{"1000-companies-profit/1000_Companies.csv": {"column_names": "[\"R&D Spend\", \"Administration\", \"Marketing Spend\", \"State\", \"Profit\"]", "column_data_types": "{\"R&D Spend\": \"float64\", \"Administration\": \"float64\", \"Marketing Spend\": \"float64\", \"State\": \"object\", \"Profit\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 R&D Spend 1000 non-null float64\n 1 Administration 1000 non-null float64\n 2 Marketing Spend 1000 non-null float64\n 3 State 1000 non-null object \n 4 Profit 1000 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 39.2+ KB\n", "summary": "{\"R&D Spend\": {\"count\": 1000.0, \"mean\": 81668.9272, \"std\": 46537.56789148918, \"min\": 0.0, \"25%\": 43084.5, \"50%\": 79936.0, \"75%\": 124565.5, \"max\": 165349.2}, \"Administration\": {\"count\": 1000.0, \"mean\": 122963.8976117, \"std\": 12613.927534630991, \"min\": 51283.14, \"25%\": 116640.68485, \"50%\": 122421.61215, \"75%\": 129139.118, \"max\": 321652.14}, \"Marketing Spend\": {\"count\": 1000.0, \"mean\": 226205.05841882998, \"std\": 91578.39354210424, \"min\": 0.0, \"25%\": 150969.5846, \"50%\": 224517.88735, \"75%\": 308189.808525, \"max\": 471784.1}, \"Profit\": {\"count\": 1000.0, \"mean\": 119546.16465561, \"std\": 42888.63384847688, \"min\": 14681.4, \"25%\": 85943.1985425, \"50%\": 117641.4663, \"75%\": 155577.107425, \"max\": 476485.43}}", "examples": "{\"R&D Spend\":{\"0\":165349.2,\"1\":162597.7,\"2\":153441.51,\"3\":144372.41},\"Administration\":{\"0\":136897.8,\"1\":151377.59,\"2\":101145.55,\"3\":118671.85},\"Marketing Spend\":{\"0\":471784.1,\"1\":443898.53,\"2\":407934.54,\"3\":383199.62},\"State\":{\"0\":\"New York\",\"1\":\"California\",\"2\":\"Florida\",\"3\":\"New York\"},\"Profit\":{\"0\":192261.83,\"1\":191792.06,\"2\":191050.39,\"3\":182901.99}}"}}]
true
1
<start_data_description><data_path>1000-companies-profit/1000_Companies.csv: <column_names> ['R&D Spend', 'Administration', 'Marketing Spend', 'State', 'Profit'] <column_types> {'R&D Spend': 'float64', 'Administration': 'float64', 'Marketing Spend': 'float64', 'State': 'object', 'Profit': 'float64'} <dataframe_Summary> {'R&D Spend': {'count': 1000.0, 'mean': 81668.9272, 'std': 46537.56789148918, 'min': 0.0, '25%': 43084.5, '50%': 79936.0, '75%': 124565.5, 'max': 165349.2}, 'Administration': {'count': 1000.0, 'mean': 122963.8976117, 'std': 12613.927534630991, 'min': 51283.14, '25%': 116640.68485, '50%': 122421.61215, '75%': 129139.118, 'max': 321652.14}, 'Marketing Spend': {'count': 1000.0, 'mean': 226205.05841882998, 'std': 91578.39354210424, 'min': 0.0, '25%': 150969.5846, '50%': 224517.88735, '75%': 308189.808525, 'max': 471784.1}, 'Profit': {'count': 1000.0, 'mean': 119546.16465561, 'std': 42888.63384847688, 'min': 14681.4, '25%': 85943.1985425, '50%': 117641.4663, '75%': 155577.107425, 'max': 476485.43}} <dataframe_info> RangeIndex: 1000 entries, 0 to 999 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 R&D Spend 1000 non-null float64 1 Administration 1000 non-null float64 2 Marketing Spend 1000 non-null float64 3 State 1000 non-null object 4 Profit 1000 non-null float64 dtypes: float64(4), object(1) memory usage: 39.2+ KB <some_examples> {'R&D Spend': {'0': 165349.2, '1': 162597.7, '2': 153441.51, '3': 144372.41}, 'Administration': {'0': 136897.8, '1': 151377.59, '2': 101145.55, '3': 118671.85}, 'Marketing Spend': {'0': 471784.1, '1': 443898.53, '2': 407934.54, '3': 383199.62}, 'State': {'0': 'New York', '1': 'California', '2': 'Florida', '3': 'New York'}, 'Profit': {'0': 192261.83, '1': 191792.06, '2': 191050.39, '3': 182901.99}} <end_description>
602
0
1,224
602
129326675
import matplotlib.pyplot as plt import pandas as pd data = pd.DataFrame( { "Tanggal Keberangkatan": [ "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", "2023-05-19", ], "Waktu Keberangkatan": ["08:00", "12:00", "14:00", "18:00", "22:00"], "Tanggal Kedatangan": [ "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", "2023-05-19", ], "Waktu Kedatangan": ["10:00", "14:00", "16:00", "20:00", "00:00"], "Durasi Penerbangan (jam)": [2, 2, 2, 2, 2], } ) plt.hist(data["Durasi Penerbangan (jam)"]) plt.xlabel("Durasi Penerbangan (jam)") plt.ylabel("Frekuensi") plt.title("Histogram Durasi Penerbangan") plt.show() plt.scatter(data["Waktu Keberangkatan"], data["Waktu Kedatangan"]) plt.xlabel("Waktu Keberangkatan") plt.ylabel("Waktu Kedatangan") plt.title("Scatter Plot Waktu Keberangkatan vs Waktu Kedatangan") plt.show() plt.boxplot(data["Durasi Penerbangan (jam)"]) plt.ylabel("Durasi Penerbangan (jam)") plt.title("Box Plot Durasi Penerbangan") plt.show() plt.bar(data["Tanggal Keberangkatan"], [1, 2, 3, 2, 1]) plt.xlabel("Tanggal Keberangkatan") plt.ylabel("Jumlah Penerbangan") plt.title("Bar Chart Jumlah Penerbangan Berdasarkan Tanggal Keberangkatan") plt.show() import matplotlib.pyplot as plt import pandas as pd data = { "tanggal_keberangkatan": [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ], "waktu_keberangkatan": [1200, 1300, 1400, 1500, 1600], "tanggal_kedatangan": [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ], "waktu_kedatangan": [1430, 1530, 1630, 1730, 1830], "durasi_penerbangan": [2.5, 2.5, 2.5, 2.5, 2.5], } df = pd.DataFrame(data) df.set_index("tanggal_keberangkatan", inplace=True) fig, ax = plt.subplots() ax.plot(df.index, df["durasi_penerbangan"], label="Durasi Penerbangan") ax.set_title("Grafik Durasi Penerbangan Maskapai") ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Durasi Penerbangan (jam)") plt.xticks(rotation=45) ax.legend() plt.show() import matplotlib.pyplot as plt import pandas as pd data = { "tanggal_keberangkatan": [ "2023-05-14", "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", ], "waktu_keberangkatan": ["08:00", "10:00", "12:00", "14:00", "16:00"], "tanggal_kedatangan": [ "2023-05-14", "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", ], "waktu_kedatangan": ["09:30", "11:30", "13:30", "15:30", "17:30"], "durasi_penerbangan": [90, 90, 90, 90, 90], } df = pd.DataFrame(data) fig, ax = plt.subplots() ax.scatter(df["tanggal_keberangkatan"], df["durasi_penerbangan"]) ax.set_title("Durasi Penerbangan Berdasarkan Tanggal Keberangkatan") ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Durasi Penerbangan (menit)") plt.show() import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.DataFrame( { "tanggal_keberangkatan": [ "2021-05-13", "2021-05-13", "2021-05-14", "2021-05-14", "2021-05-15", "2021-05-15", ], "waktu_keberangkatan": [ "08:00:00", "12:00:00", "08:00:00", "12:00:00", "08:00:00", "12:00:00", ], "tanggal_kedatangan": [ "2021-05-13", "2021-05-13", "2021-05-14", "2021-05-14", "2021-05-15", "2021-05-15", ], "waktu_kedatangan": [ "10:30:00", "14:30:00", "10:30:00", "14:30:00", "10:30:00", "14:30:00", ], "durasi_penerbangan": [2.5, 2.5, 2.5, 2.5, 2.5, 2.5], } ) data["tanggal_keberangkatan"] = pd.to_datetime(data["tanggal_keberangkatan"]) data["tanggal_kedatangan"] = pd.to_datetime(data["tanggal_kedatangan"]) data["day_of_week"] = data["tanggal_keberangkatan"].dt.dayofweek pivot = pd.pivot_table( data, values="durasi_penerbangan", index="day_of_week", columns="waktu_keberangkatan", ) sns.heatmap(pivot, cmap="YlGnBu", annot=True, fmt=".1f") plt.title("Durasi Penerbangan per Hari dalam Seminggu dan Waktu Keberangkatan") plt.xlabel("Waktu Keberangkatan") plt.ylabel("Hari dalam Seminggu") plt.show() import matplotlib.pyplot as plt tanggal_keberangkatan = [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ] jumlah_penerbangan = [80, 70, 90, 85, 75] fig, ax = plt.subplots() ax.bar(tanggal_keberangkatan, jumlah_penerbangan) ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Jumlah Penerbangan") ax.set_title("Grafik Jumlah Penerbangan Maskapai XYZ") plt.show() import folium data_penerbangan = [ {"keberangkatan": "Jakarta", "tujuan": "Surabaya"}, {"keberangkatan": "Jakarta", "tujuan": "Bali"}, {"keberangkatan": "Surabaya", "tujuan": "Bali"}, {"keberangkatan": "Surabaya", "tujuan": "Jakarta"}, {"keberangkatan": "Bali", "tujuan": "Jakarta"}, {"keberangkatan": "Bali", "tujuan": "Surabaya"}, ] peta = folium.Map(location=[-2.548926, 118.014863], zoom_start=5) for p in data_penerbangan: folium.Marker( location=[-6.1753924, 106.8271528], popup=p["keberangkatan"], icon=folium.Icon(color="red"), ).add_to(peta) for p in data_penerbangan: folium.Marker( location=[-8.4095181, 115.188916], popup=p["tujuan"], icon=folium.Icon(color="green"), ).add_to(peta) peta
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326675.ipynb
null
null
[{"Id": 129326675, "ScriptId": 38449348, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14545408, "CreationDate": "05/12/2023 20:03:15", "VersionNumber": 1.0, "Title": "notebook8ef928e75b", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 147.0, "LinesInsertedFromPrevious": 147.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import matplotlib.pyplot as plt import pandas as pd data = pd.DataFrame( { "Tanggal Keberangkatan": [ "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", "2023-05-19", ], "Waktu Keberangkatan": ["08:00", "12:00", "14:00", "18:00", "22:00"], "Tanggal Kedatangan": [ "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", "2023-05-19", ], "Waktu Kedatangan": ["10:00", "14:00", "16:00", "20:00", "00:00"], "Durasi Penerbangan (jam)": [2, 2, 2, 2, 2], } ) plt.hist(data["Durasi Penerbangan (jam)"]) plt.xlabel("Durasi Penerbangan (jam)") plt.ylabel("Frekuensi") plt.title("Histogram Durasi Penerbangan") plt.show() plt.scatter(data["Waktu Keberangkatan"], data["Waktu Kedatangan"]) plt.xlabel("Waktu Keberangkatan") plt.ylabel("Waktu Kedatangan") plt.title("Scatter Plot Waktu Keberangkatan vs Waktu Kedatangan") plt.show() plt.boxplot(data["Durasi Penerbangan (jam)"]) plt.ylabel("Durasi Penerbangan (jam)") plt.title("Box Plot Durasi Penerbangan") plt.show() plt.bar(data["Tanggal Keberangkatan"], [1, 2, 3, 2, 1]) plt.xlabel("Tanggal Keberangkatan") plt.ylabel("Jumlah Penerbangan") plt.title("Bar Chart Jumlah Penerbangan Berdasarkan Tanggal Keberangkatan") plt.show() import matplotlib.pyplot as plt import pandas as pd data = { "tanggal_keberangkatan": [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ], "waktu_keberangkatan": [1200, 1300, 1400, 1500, 1600], "tanggal_kedatangan": [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ], "waktu_kedatangan": [1430, 1530, 1630, 1730, 1830], "durasi_penerbangan": [2.5, 2.5, 2.5, 2.5, 2.5], } df = pd.DataFrame(data) df.set_index("tanggal_keberangkatan", inplace=True) fig, ax = plt.subplots() ax.plot(df.index, df["durasi_penerbangan"], label="Durasi Penerbangan") ax.set_title("Grafik Durasi Penerbangan Maskapai") ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Durasi Penerbangan (jam)") plt.xticks(rotation=45) ax.legend() plt.show() import matplotlib.pyplot as plt import pandas as pd data = { "tanggal_keberangkatan": [ "2023-05-14", "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", ], "waktu_keberangkatan": ["08:00", "10:00", "12:00", "14:00", "16:00"], "tanggal_kedatangan": [ "2023-05-14", "2023-05-15", "2023-05-16", "2023-05-17", "2023-05-18", ], "waktu_kedatangan": ["09:30", "11:30", "13:30", "15:30", "17:30"], "durasi_penerbangan": [90, 90, 90, 90, 90], } df = pd.DataFrame(data) fig, ax = plt.subplots() ax.scatter(df["tanggal_keberangkatan"], df["durasi_penerbangan"]) ax.set_title("Durasi Penerbangan Berdasarkan Tanggal Keberangkatan") ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Durasi Penerbangan (menit)") plt.show() import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.DataFrame( { "tanggal_keberangkatan": [ "2021-05-13", "2021-05-13", "2021-05-14", "2021-05-14", "2021-05-15", "2021-05-15", ], "waktu_keberangkatan": [ "08:00:00", "12:00:00", "08:00:00", "12:00:00", "08:00:00", "12:00:00", ], "tanggal_kedatangan": [ "2021-05-13", "2021-05-13", "2021-05-14", "2021-05-14", "2021-05-15", "2021-05-15", ], "waktu_kedatangan": [ "10:30:00", "14:30:00", "10:30:00", "14:30:00", "10:30:00", "14:30:00", ], "durasi_penerbangan": [2.5, 2.5, 2.5, 2.5, 2.5, 2.5], } ) data["tanggal_keberangkatan"] = pd.to_datetime(data["tanggal_keberangkatan"]) data["tanggal_kedatangan"] = pd.to_datetime(data["tanggal_kedatangan"]) data["day_of_week"] = data["tanggal_keberangkatan"].dt.dayofweek pivot = pd.pivot_table( data, values="durasi_penerbangan", index="day_of_week", columns="waktu_keberangkatan", ) sns.heatmap(pivot, cmap="YlGnBu", annot=True, fmt=".1f") plt.title("Durasi Penerbangan per Hari dalam Seminggu dan Waktu Keberangkatan") plt.xlabel("Waktu Keberangkatan") plt.ylabel("Hari dalam Seminggu") plt.show() import matplotlib.pyplot as plt tanggal_keberangkatan = [ "2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04", "2022-01-05", ] jumlah_penerbangan = [80, 70, 90, 85, 75] fig, ax = plt.subplots() ax.bar(tanggal_keberangkatan, jumlah_penerbangan) ax.set_xlabel("Tanggal Keberangkatan") ax.set_ylabel("Jumlah Penerbangan") ax.set_title("Grafik Jumlah Penerbangan Maskapai XYZ") plt.show() import folium data_penerbangan = [ {"keberangkatan": "Jakarta", "tujuan": "Surabaya"}, {"keberangkatan": "Jakarta", "tujuan": "Bali"}, {"keberangkatan": "Surabaya", "tujuan": "Bali"}, {"keberangkatan": "Surabaya", "tujuan": "Jakarta"}, {"keberangkatan": "Bali", "tujuan": "Jakarta"}, {"keberangkatan": "Bali", "tujuan": "Surabaya"}, ] peta = folium.Map(location=[-2.548926, 118.014863], zoom_start=5) for p in data_penerbangan: folium.Marker( location=[-6.1753924, 106.8271528], popup=p["keberangkatan"], icon=folium.Icon(color="red"), ).add_to(peta) for p in data_penerbangan: folium.Marker( location=[-8.4095181, 115.188916], popup=p["tujuan"], icon=folium.Icon(color="green"), ).add_to(peta) peta
false
0
2,540
0
2,540
2,540
129326643
from datetime import datetime, timedelta departure_date = "2022-05-15" departure_time = "07:30" arrival_date = "2022-05-16" arrival_time = "10:45" departure_datetime = datetime.strptime( departure_date + " " + departure_time, "%Y-%m-%d %H:%M" ) arrival_datetime = datetime.strptime( arrival_date + " " + arrival_time, "%Y-%m-%d %H:%M" ) flight_duration = arrival_datetime - departure_datetime print("Durasi penerbangan:", flight_duration) print("Durasi penerbangan (jam):", flight_duration.total_seconds() / 3600) from datetime import datetime departure_datetime = "2022-05-15 07:30" arrival_datetime = "2022-05-16 10:45" departure_date, departure_time = departure_datetime.split(" ") departure_year, departure_month, departure_day = departure_date.split("-") departure_hour, departure_minute = departure_time.split(":") arrival_date, arrival_time = arrival_datetime.split(" ") arrival_year, arrival_month, arrival_day = arrival_date.split("-") arrival_hour, arrival_minute = arrival_time.split(":") print("Tanggal keberangkatan:", departure_date) print("Waktu keberangkatan:", departure_time) print("Tanggal kedatangan:", arrival_date) print("Waktu kedatangan:", arrival_time) import pytz from datetime import datetime waktu_asli = datetime(2022, 5, 15, 10, 30, 0) zona_waktu_asli = pytz.timezone("Asia/Jakarta") zona_waktu_tujuan = pytz.timezone("US/Pacific") waktu_tujuan = zona_waktu_asli.localize(waktu_asli).astimezone(zona_waktu_tujuan) print("Waktu asli:", waktu_asli) print("Zona waktu asli:", zona_waktu_asli) print("Waktu konversi:", waktu_tujuan) print("Zona waktu konversi:", zona_waktu_tujuan) from datetime import datetime, timedelta tgl_keberangkatan = "2023-05-15" waktu_keberangkatan = "07:30" tgl_kedatangan = "2023-05-16" waktu_kedatangan = "01:45" keberangkatan = datetime.strptime( tgl_keberangkatan + " " + waktu_keberangkatan, "%Y-%m-%d %H:%M" ) kedatangan = datetime.strptime( tgl_kedatangan + " " + waktu_kedatangan, "%Y-%m-%d %H:%M" ) durasi_penerbangan = kedatangan - keberangkatan print("Durasi penerbangan: ", durasi_penerbangan) import datetime waktu_jam = "2022-05-14 13:30:00" waktu_jam_dt = datetime.datetime.strptime(waktu_jam, "%Y-%m-%d %H:%M:%S") waktu_hari = waktu_jam_dt.date() print("Waktu jam:", waktu_jam) print("Waktu hari:", waktu_hari) from datetime import datetime tanggal_keberangkatan = "2022-05-20" waktu_keberangkatan = "07:30:00" tanggalkan_keberangkatan_str = tanggal_keberangkatan + " " + waktu_keberangkatan tanggalkan_keberangkatan_dt = datetime.strptime( tanggalkan_keberangkatan_str, "%Y-%m-%d %H:%M:%S" ) print(tanggalkan_keberangkatan_dt)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326643.ipynb
null
null
[{"Id": 129326643, "ScriptId": 38448290, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14545408, "CreationDate": "05/12/2023 20:02:48", "VersionNumber": 1.0, "Title": "notebook3068a9f5db", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from datetime import datetime, timedelta departure_date = "2022-05-15" departure_time = "07:30" arrival_date = "2022-05-16" arrival_time = "10:45" departure_datetime = datetime.strptime( departure_date + " " + departure_time, "%Y-%m-%d %H:%M" ) arrival_datetime = datetime.strptime( arrival_date + " " + arrival_time, "%Y-%m-%d %H:%M" ) flight_duration = arrival_datetime - departure_datetime print("Durasi penerbangan:", flight_duration) print("Durasi penerbangan (jam):", flight_duration.total_seconds() / 3600) from datetime import datetime departure_datetime = "2022-05-15 07:30" arrival_datetime = "2022-05-16 10:45" departure_date, departure_time = departure_datetime.split(" ") departure_year, departure_month, departure_day = departure_date.split("-") departure_hour, departure_minute = departure_time.split(":") arrival_date, arrival_time = arrival_datetime.split(" ") arrival_year, arrival_month, arrival_day = arrival_date.split("-") arrival_hour, arrival_minute = arrival_time.split(":") print("Tanggal keberangkatan:", departure_date) print("Waktu keberangkatan:", departure_time) print("Tanggal kedatangan:", arrival_date) print("Waktu kedatangan:", arrival_time) import pytz from datetime import datetime waktu_asli = datetime(2022, 5, 15, 10, 30, 0) zona_waktu_asli = pytz.timezone("Asia/Jakarta") zona_waktu_tujuan = pytz.timezone("US/Pacific") waktu_tujuan = zona_waktu_asli.localize(waktu_asli).astimezone(zona_waktu_tujuan) print("Waktu asli:", waktu_asli) print("Zona waktu asli:", zona_waktu_asli) print("Waktu konversi:", waktu_tujuan) print("Zona waktu konversi:", zona_waktu_tujuan) from datetime import datetime, timedelta tgl_keberangkatan = "2023-05-15" waktu_keberangkatan = "07:30" tgl_kedatangan = "2023-05-16" waktu_kedatangan = "01:45" keberangkatan = datetime.strptime( tgl_keberangkatan + " " + waktu_keberangkatan, "%Y-%m-%d %H:%M" ) kedatangan = datetime.strptime( tgl_kedatangan + " " + waktu_kedatangan, "%Y-%m-%d %H:%M" ) durasi_penerbangan = kedatangan - keberangkatan print("Durasi penerbangan: ", durasi_penerbangan) import datetime waktu_jam = "2022-05-14 13:30:00" waktu_jam_dt = datetime.datetime.strptime(waktu_jam, "%Y-%m-%d %H:%M:%S") waktu_hari = waktu_jam_dt.date() print("Waktu jam:", waktu_jam) print("Waktu hari:", waktu_hari) from datetime import datetime tanggal_keberangkatan = "2022-05-20" waktu_keberangkatan = "07:30:00" tanggalkan_keberangkatan_str = tanggal_keberangkatan + " " + waktu_keberangkatan tanggalkan_keberangkatan_dt = datetime.strptime( tanggalkan_keberangkatan_str, "%Y-%m-%d %H:%M:%S" ) print(tanggalkan_keberangkatan_dt)
false
0
1,080
0
1,080
1,080
129326661
import matplotlib.pyplot as plt harga = [10, 15, 20, 25, 30, 35, 40, 45, 50] jumlah_penjualan = [100, 150, 200, 250, 300, 350, 400, 450, 500] margin_keuntungan = [5, 10, 15, 20, 25, 30, 35, 40, 45] rating_pelanggan = [2, 3, 3, 4, 4, 4, 5, 5, 5] data = [harga, jumlah_penjualan, margin_keuntungan, rating_pelanggan] plt.boxplot(data) plt.xticks( [1, 2, 3, 4], ["Harga", "Jumlah Penjualan", "Margin Keuntungan", "Rating Pelanggan"] ) plt.title("Box Plot Variabel Numerik") plt.show() import matplotlib.pyplot as plt harga = [1000, 2000, 1500, 3000, 2500] jumlah_penjualan = [50, 100, 70, 150, 120] margin_keuntungan = [10, 15, 12, 20, 18] rating_pelanggan = [3.5, 4.0, 3.7, 4.2, 4.1] plt.scatter(harga, jumlah_penjualan, color="red", label="Jumlah Penjualan") plt.scatter(harga, margin_keuntungan, color="blue", label="Margin Keuntungan") plt.scatter(harga, rating_pelanggan, color="green", label="Rating Pelanggan") plt.xlabel("Harga") plt.legend() plt.show() import matplotlib.pyplot as plt import numpy as np data = np.random.rand(4, 4) x_labels = ["Harga", "Jumlah Penjualan", "Margin Keuntungan", "Rating Pelanggan"] y_labels = ["Produk 1", "Produk 2", "Produk 3", "Produk 4"] fig, ax = plt.subplots() im = ax.imshow(data, cmap="Blues") ax.set_xticks(np.arange(len(x_labels))) ax.set_yticks(np.arange(len(y_labels))) ax.set_xticklabels(x_labels) ax.set_yticklabels(y_labels) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") for i in range(len(y_labels)): for j in range(len(x_labels)): text = ax.text( j, i, f"{data[i,j]:.2f}", ha="center", va="center", color="black" ) cbar = ax.figure.colorbar(im, ax=ax) ax.set_title("Heatmap") plt.show() import matplotlib.pyplot as plt harga = [10, 20, 30, 40, 50] penjualan = [100, 200, 150, 250, 300] margin = [20, 25, 30, 35, 40] rating = [4.2, 4.5, 4.0, 4.3, 4.7] plt.plot(harga, penjualan, marker="o") plt.plot(harga, margin, marker="o") plt.plot(harga, rating, marker="o") plt.xlabel("Harga") plt.ylabel("Jumlah") plt.title("Grafik Line Plot") plt.show() import seaborn as sns import pandas as pd data = { "Harga": [500000, 600000, 750000, 900000, 1000000], "Jumlah Penjualan": [10, 12, 15, 18, 20], "Margin Keuntungan": [200000, 250000, 300000, 350000, 400000], "Rating Pelanggan": [3.5, 4.0, 4.5, 4.7, 5.0], } df = pd.DataFrame(data) corr = df.corr() sns.heatmap(corr, annot=True, cmap="coolwarm")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/326/129326661.ipynb
null
null
[{"Id": 129326661, "ScriptId": 38448962, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14545408, "CreationDate": "05/12/2023 20:03:04", "VersionNumber": 1.0, "Title": "notebook1305764811", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 92.0, "LinesInsertedFromPrevious": 92.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import matplotlib.pyplot as plt harga = [10, 15, 20, 25, 30, 35, 40, 45, 50] jumlah_penjualan = [100, 150, 200, 250, 300, 350, 400, 450, 500] margin_keuntungan = [5, 10, 15, 20, 25, 30, 35, 40, 45] rating_pelanggan = [2, 3, 3, 4, 4, 4, 5, 5, 5] data = [harga, jumlah_penjualan, margin_keuntungan, rating_pelanggan] plt.boxplot(data) plt.xticks( [1, 2, 3, 4], ["Harga", "Jumlah Penjualan", "Margin Keuntungan", "Rating Pelanggan"] ) plt.title("Box Plot Variabel Numerik") plt.show() import matplotlib.pyplot as plt harga = [1000, 2000, 1500, 3000, 2500] jumlah_penjualan = [50, 100, 70, 150, 120] margin_keuntungan = [10, 15, 12, 20, 18] rating_pelanggan = [3.5, 4.0, 3.7, 4.2, 4.1] plt.scatter(harga, jumlah_penjualan, color="red", label="Jumlah Penjualan") plt.scatter(harga, margin_keuntungan, color="blue", label="Margin Keuntungan") plt.scatter(harga, rating_pelanggan, color="green", label="Rating Pelanggan") plt.xlabel("Harga") plt.legend() plt.show() import matplotlib.pyplot as plt import numpy as np data = np.random.rand(4, 4) x_labels = ["Harga", "Jumlah Penjualan", "Margin Keuntungan", "Rating Pelanggan"] y_labels = ["Produk 1", "Produk 2", "Produk 3", "Produk 4"] fig, ax = plt.subplots() im = ax.imshow(data, cmap="Blues") ax.set_xticks(np.arange(len(x_labels))) ax.set_yticks(np.arange(len(y_labels))) ax.set_xticklabels(x_labels) ax.set_yticklabels(y_labels) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") for i in range(len(y_labels)): for j in range(len(x_labels)): text = ax.text( j, i, f"{data[i,j]:.2f}", ha="center", va="center", color="black" ) cbar = ax.figure.colorbar(im, ax=ax) ax.set_title("Heatmap") plt.show() import matplotlib.pyplot as plt harga = [10, 20, 30, 40, 50] penjualan = [100, 200, 150, 250, 300] margin = [20, 25, 30, 35, 40] rating = [4.2, 4.5, 4.0, 4.3, 4.7] plt.plot(harga, penjualan, marker="o") plt.plot(harga, margin, marker="o") plt.plot(harga, rating, marker="o") plt.xlabel("Harga") plt.ylabel("Jumlah") plt.title("Grafik Line Plot") plt.show() import seaborn as sns import pandas as pd data = { "Harga": [500000, 600000, 750000, 900000, 1000000], "Jumlah Penjualan": [10, 12, 15, 18, 20], "Margin Keuntungan": [200000, 250000, 300000, 350000, 400000], "Rating Pelanggan": [3.5, 4.0, 4.5, 4.7, 5.0], } df = pd.DataFrame(data) corr = df.corr() sns.heatmap(corr, annot=True, cmap="coolwarm")
false
0
1,177
0
1,177
1,177