{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n\")\nhtmlFile.close()\nprintLog(\"Done.\")\nExplanation: To facilitate the assessment of the effectiveness of the different measures and metrics, the next cell creates an HTML overview document with the first found documents.\nSample results are available here.\nEnd of explanation\nqbeIndexLocalFeat=17#qbeIndex#17 #17=Welt\nimg1=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)\nimg2=imread(jpegFilePaths[1301],as_gray=True)\nimg3=imread(jpegFilePaths[1671],as_gray=True)\n#Creates two subplots and unpacks the output array immediately\nf, (ax1, ax2,ax3) = plt.subplots(1, 3, sharex='all', sharey='all')\nax1.axis('off')\nax2.axis('off')\nax3.axis('off')\nax1.set_title(\"Query #%i\"%qbeIndexLocalFeat)\nax1.imshow(img1)\nax2.set_title(\"Index #1301\")\nax2.imshow(img2)\nax3.set_title(\"Index #1671\")\nax3.imshow(img3)\nExplanation: A Local Feature - ORB\nThe local ORB (Oriented FAST and Rotated BRIEF) feature takes interesting regions of an image into account - the so-called keypoints. In cotrast to the presented approaches which only consider the whole image at a time and which are therefore called global features, local feature extractors search for keypoints and try to match them with the ones found in another image. \nHypothetically speaking, such features should be helpful to discover similar details in different images no matter how they differ in scale or rotation. Hence, ORB is considered relatively scale and rotation invariant.\nIn this section, we will investigate wheter ORB can be used to find pages in the Orbis Pictus decribing the concept of the \"world\" which are present in three editions of the book as displayed below.\nEnd of explanation\n# extract features\ndescriptor_extractor = ORB(n_keypoints=200)\ndescriptor_extractor.detect_and_extract(img1)\nkeypoints1 = descriptor_extractor.keypoints\ndescriptors1 = descriptor_extractor.descriptors\ndescriptor_extractor.detect_and_extract(img2)\nkeypoints2 = descriptor_extractor.keypoints\ndescriptors2 = descriptor_extractor.descriptors\n# match features\nmatches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)\n# visualize the results\nfig, ax = plt.subplots(nrows=1, ncols=1)\nplt.gray()\nplot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)\nax.axis('off')\nax.set_title(\"Image 1 vs. Image 2\")\nExplanation: To give an example, we will extract ORB features from the first two images and match them. The discovered matches will be illustrated below.\nEnd of explanation\nprintLog(\"Calculating ORB QBE scenarios...\")\n#qbeIndexLocalFeat\n# prepare QBE image\ndescriptor_extractor = ORB(n_keypoints=200)\n# prepare QBE image\nqbeImage=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)\ndescriptor_extractor.detect_and_extract(qbeImage)\nqbeKeypoints = descriptor_extractor.keypoints\nqbeDescriptors = descriptor_extractor.descriptors\norbDescriptors=[]\norbMatches=[]\n# match QBE image against the corpus\ndataDict={\"index\":[],\"matches_orb\":[]}\nfor i,jpeg in enumerate(jpegFilePaths):\n dataDict[\"index\"].append(i)\n compImage=imread(jpeg,as_gray=True)\n descriptor_extractor.detect_and_extract(compImage)\n keypoints = descriptor_extractor.keypoints\n descriptors = descriptor_extractor.descriptors\n \n orbDescriptors.append(descriptors)\n matches = match_descriptors(qbeDescriptors, descriptors, cross_check=True)#,max_distance=0.5)\n orbMatches.append(matches)\n # naive approach: count the number of matched descriptors\n dataDict[\"matches_orb\"].append(matches.shape[0])\n \n if i%100==0:\n printLog(\"Processed %i documents of %i.\"%(i,len(jpegFilePaths)))\n \ndf=pd.DataFrame(dataDict)\nprintLog(\"Done.\")\ndf2=df.sort_values(by=['matches_orb'],ascending=False).head(20)\ndf2.describe()\nExplanation: ATTENTION ! Depending on your computer setup, the next cell will take some time to finish. See the log below to get an estimation. The experiment has been run with with MacBook Pro (13-inch, 2018, 2,7 GHz Intel Core i7, 16 GB, and macOS Mojave).\nIn this naive approach, we will simply count the number of matches between the query image and each image in the corpus and use this value as a similarity score.\nEnd of explanation\nprintLog(\"Calculating Hamming distances for ORB features and calculating average distance...\")\naverageDistancePerImage=[]\nfor i,matches in enumerate(orbMatches):\n # matches qbe\n # matches[:, 0]\n # matches document\n # matches[:, 1]\n qbeMatchIndices=matches[:, 0]\n queryMatchIndices=matches[:, 1]\n sumDistances=0.0\n noMatches=len(qbeMatchIndices)\n for j,qbeMatchIndex in enumerate(qbeMatchIndices):\n sumDistances+=hamming(qbeDescriptors[qbeMatchIndex],orbDescriptors[i][queryMatchIndices[j]])\n avgDistance=sumDistances/noMatches\n averageDistancePerImage.append((avgDistance,i))\n if i%100==0:\n printLog(\"Processed %i documents of %i.\"%(i,len(orbMatches)))\n \naverageDistancePerImage.sort(key=lambda tup: tup[0])\nprintLog(\"Done.\\n\")\n# create the report files\nmeasures=[\"matches_orb\"]\nranks=dict()\nprintLog(\"Creating QBE ORB report files...\")\nhtmlFile=open(outputDir+\"_orb.html\", \"w\")\nprintLog(\"HTML output will be saved to: %s\"%outputDir+\"_orb.html\")\nhtmlFile.write(\"\\n\")\nhtmlFile.write(\"\\n\")\n#htmlFile.write(\"\\n\")\nhtmlFile.write(\"

orb comparison.

\")\nhtmlFile.write(\"\\n\")\nfor measureName in measures:\n typeOfMeasure=\"similarity\"\n htmlFile.write(\"\\n\")\n# the non-naive approach using the average distance\nhtmlFile.write(\"\\n\")\n#eof\n# close the HTML file\nhtmlFile.write(\"
\\n\")\n htmlFile.write(\"

\"+measureName+\"

\\n\")\n htmlFile.write(\"

\"+typeOfMeasure+\"

\\n\")\n ranks[measureName]=df2.index.tolist()\n jpegFilePathsReport=[]\n # image directory must be relative to the directory of the html files\n imgBaseDir=\"./extracted_images/\"\n for row in df2.itertuples(index=False):\n i=row.index\n score=getattr(row, measureName)\n # create JPEG copies if not available already\n tiffImage=imgBaseDir+ppnList[i]+\"/\"+nameList[i]\n jpgPath=tiffImage.replace(\".tif\",\".jpg\")\n if not os.path.exists(outputDir+jpgPath):\n image = Image.open(outputDir+tiffImage)\n image.thumbnail((512,512))\n image.save(outputDir+jpgPath)\n image.close()\n os.remove(outputDir+tiffImage)\n jpegFilePathsReport.append(outputDir+jpgPath)\n if i==qbeIndex:\n htmlFile.write(\"\"+str(i)+\"\\n\")\n else:\n htmlFile.write(\"\"+str(i)+\"\\n\")\n #htmlFile.write(\"

\"+str(score)+\"

\") \n htmlFile.write(\"

 

\\n\")\n htmlFile.write(\"
\\n\")\nhtmlFile.write(\"

dist_avg_orb

\\n\")\nhtmlFile.write(\"

\"+typeOfMeasure+\"

\\n\")\nfor (dist,index) in averageDistancePerImage[:20]:\n typeOfMeasure=\"similarity\"\n jpegFilePathsReport=[]\n # image directory must be relative to the directory of the html files\n imgBaseDir=\"./extracted_images/\"\n \n i=index\n score=dist\n # create JPEG copies if not available already\n tiffImage=imgBaseDir+ppnList[i]+\"/\"+nameList[i]\n jpgPath=tiffImage.replace(\".tif\",\".jpg\")\n if not os.path.exists(outputDir+jpgPath):\n image = Image.open(outputDir+tiffImage)\n image.thumbnail((512,512))\n image.save(outputDir+jpgPath)\n image.close()\n os.remove(outputDir+tiffImage)\n jpegFilePathsReport.append(outputDir+jpgPath)\n if i==qbeIndex:\n htmlFile.write(\"\"+str(i)+\"\\n\")\n else:\n htmlFile.write(\"\"+str(i)+\"\\n\")\n htmlFile.write(\"

\"+str(score)+\"

\") \n htmlFile.write(\"

 

\\n\")\n \nhtmlFile.write(\"
\\n\")\nhtmlFile.write(\"\\n\")\nhtmlFile.close()\nprintLog(\"Done.\")\nExplanation: In a little more sophisticated approach, we will compute the average distance for each query-image pair for all matches. This value yields another similarity score.\nEventually, a HTML report file is created to compare the results of both approaches.\nSample results are available here.\nEnd of explanation\nprintLog(\"Clustering...\")\nX=np.array(combinedHistograms)\nnumberOfClusters=20\nkmeans = MiniBatchKMeans(n_clusters=numberOfClusters, random_state = 0, batch_size = 6)\nkmeans=kmeans.fit(X)\nprintLog(\"Done.\")\nprintLog(\"Creating report files...\")\nhtmlFiles=[]\njpegFilePaths=[]\nfor i in range(0,numberOfClusters):\n htmlFile=open(outputDir+str(i)+\".html\", \"w\")\n htmlFile.write(\"\\n\\n\")\n #htmlFile.write(\"

Cluster \"+str(i)+\"

\\n\")\n htmlFile.write(\"\") # cluster center histogram will created below\n htmlFiles.append(htmlFile)\n# image directory must be relative to the directory of the html files\nimgBaseDir=\"./extracted_images/\"\nfor i, label in enumerate(kmeans.labels_):\n # create JPEG copies if not available already\n tiffImage=imgBaseDir+ppnList[i]+\"/\"+nameList[i]\n jpgPath=tiffImage.replace(\".tif\",\".jpg\")\n if not os.path.exists(outputDir+jpgPath):\n image = Image.open(outputDir+tiffImage)\n image.thumbnail((512,512))\n image.save(outputDir+jpgPath)\n image.close()\n os.remove(outputDir+tiffImage)\n jpegFilePaths.append(outputDir+jpgPath)\n \n htmlFiles[label].write(\"\"+str(len(jpegFilePaths)-1)+\"\\n\")\n# close the HTML files\nfor h in htmlFiles:\n h.write(\"\\n\")\n h.close()\n# create the summarization main HTML page\nhtmlFile = open(outputDir+\"_main.html\", \"w\")\nprintLog(\"HTML output will be saved to: %s\"%outputDir+\"_main.html\")\nhtmlFile.write(\"\\n\")\nhtmlFile.write(\"

cluster results.

\\n\")\nfor i in range(0, numberOfClusters):\n htmlFile.write(\"\")\nhtmlFile.write(\"\\n\")\nhtmlFile.close()\nprintLog(\"Done.\")\n# save the cluster center histograms as images to assist the visualization\nprintLog(\"Rendering %i cluster center histograms...\"%len(kmeans.cluster_centers_))\nfor j, histogram in enumerate(kmeans.cluster_centers_):\n plt.figure(0)\n # clean previous plots\n plt.clf()\n plt.title(\"Cluster %i\"%j)\n #red\n for i in range(0, 256):\n plt.bar(i, histogram[i],color='red', alpha=0.3)\n # blue\n for i in range(256, 512):\n plt.bar(i-256, histogram[i], color='blue', alpha=0.3)\n # green\n for i in range(512, 768):\n plt.bar(i-512, histogram[i], color='green', alpha=0.3)\n #debug\n #plt.show()\n plt.savefig(outputDir+str(j)+\".png\")\nprintLog(\"Done.\")\nExplanation: Histogram-based Clustering\nSample results are available here.\nEnd of explanation"}}},{"rowIdx":2107,"cells":{"Unnamed: 0":{"kind":"number","value":2107,"string":"2,107"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Purpose\nThe purpose of this notebook is to work out the data structure for saving the computed results for a single session. Here we are using the xarray package to structure the data, because\nStep1: Go through the steps to get the ripple triggered connectivity\nStep2: Make an xarray dataset for coherence and pairwise spectral granger\nStep3: Show that it is easy to select two individual tetrodes and plot a subset of their frequency for coherence.\nStep4: Show the same thing for spectral granger.\nStep5: Now show that we can plot all tetrodes pairs in a dataset\nStep6: It is also easy to select a subset of tetrode pairs (in this case all CA1-PFC tetrode pairs).\nStep7: xarray also makes it easy to compare the difference of a connectivity measure from its baseline (in this case, the baseline is the first time bin)\nStep8: It is also easy to average over the tetrode pairs\nStep9: And also average over the difference\nStep10: Test saving as netcdf file\nStep11: Show that we can open the saved dataset and recover the data\nStep12: Make data structure for group delay\nStep13: Make data structure for canonical coherence\nStep14: Now after adding this code into the code base, test if we can compute, save, and load"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport xarray as xr\nfrom src.data_processing import (get_LFP_dataframe, make_tetrode_dataframe,\n make_tetrode_pair_info, reshape_to_segments)\nfrom src.parameters import (ANIMALS, SAMPLING_FREQUENCY,\n MULTITAPER_PARAMETERS, FREQUENCY_BANDS,\n RIPPLE_COVARIATES, ALPHA)\nfrom src.analysis import (decode_ripple_clusterless,\n detect_epoch_ripples, is_overlap,\n _subtract_event_related_potential)\nExplanation: Purpose\nThe purpose of this notebook is to work out the data structure for saving the computed results for a single session. Here we are using the xarray package to structure the data, because:\nIt is built to handle large multi-dimensional data (orginally for earth sciences data).\nIt allows you to call dimensions by name (time, frequency, etc).\nThe plotting functions are convenient for multi-dimensional data (it has convenient heatmap plotting).\nIt can output to HDF5 (via the netcdf format, a geosciences data format), which is built for handling large data in a descriptive (i.e. can label units, add information about how data was constructed, etc.).\nLazily loads data so large datasets that are too big for memory can be handled (via dask).\nPreviously, I was using the pandas package in python and this wasn't handling the loading and combining of time-frequency data. In particular, the size of the data was problematic even on the cluster and this was frustrating to debug. pandas now recommends the usage of xarray for multi-dimesional data.\nEnd of explanation\nepoch_key = ('HPa', 6, 2)\nripple_times = detect_epoch_ripples(\n epoch_key, ANIMALS, sampling_frequency=SAMPLING_FREQUENCY)\ntetrode_info = make_tetrode_dataframe(ANIMALS)[epoch_key]\ntetrode_info = tetrode_info[\n ~tetrode_info.descrip.str.endswith('Ref').fillna(False)]\ntetrode_pair_info = make_tetrode_pair_info(tetrode_info)\nlfps = {tetrode_key: get_LFP_dataframe(tetrode_key, ANIMALS)\n for tetrode_key in tetrode_info.index}\nfrom copy import deepcopy\nfrom functools import partial, wraps\nmultitaper_parameter_name = '4Hz_Resolution'\nmultitaper_params = MULTITAPER_PARAMETERS[multitaper_parameter_name]\nnum_lfps = len(lfps)\nnum_pairs = int(num_lfps * (num_lfps - 1) / 2)\nparams = deepcopy(multitaper_params)\nwindow_of_interest = params.pop('window_of_interest')\nreshape_to_trials = partial(\n reshape_to_segments,\n sampling_frequency=params['sampling_frequency'],\n window_offset=window_of_interest, concat_axis=1)\nripple_locked_lfps = pd.Panel({\n lfp_name: _subtract_event_related_potential(\n reshape_to_trials(lfps[lfp_name], ripple_times))\n for lfp_name in lfps})\nfrom src.spectral.connectivity import Connectivity\nfrom src.spectral.transforms import Multitaper\nm = Multitaper(\n np.rollaxis(ripple_locked_lfps.values, 0, 3),\n **params,\n start_time=ripple_locked_lfps.major_axis.min())\nc = Connectivity(\n fourier_coefficients=m.fft(),\n frequencies=m.frequencies,\n time=m.time)\nExplanation: Go through the steps to get the ripple triggered connectivity\nEnd of explanation\nn_lfps = len(lfps)\nds = xr.Dataset(\n {'coherence_magnitude': (['time', 'frequency', 'tetrode1', 'tetrode2'], c.coherence_magnitude()),\n 'pairwise_spectral_granger_prediction': (['time', 'frequency', 'tetrode1', 'tetrode2'], c.pairwise_spectral_granger_prediction())},\n coords={'time': c.time + np.diff(c.time)[0] / 2, \n 'frequency': c.frequencies + np.diff(c.frequencies)[0] / 2,\n 'tetrode1': tetrode_info.tetrode_id.values,\n 'tetrode2': tetrode_info.tetrode_id.values,\n 'brain_area1': ('tetrode1', tetrode_info.area.tolist()),\n 'brain_area2': ('tetrode2', tetrode_info.area.tolist()),\n 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]),\n }\n)\nds\nExplanation: Make an xarray dataset for coherence and pairwise spectral granger\nEnd of explanation\nds.sel(\n tetrode1='HPa621',\n tetrode2='HPa624',\n frequency=slice(0, 30)).coherence_magnitude.plot(x='time', y='frequency');\nExplanation: Show that it is easy to select two individual tetrodes and plot a subset of their frequency for coherence.\nEnd of explanation\nds.sel(\n tetrode1='HPa621',\n tetrode2='HPa6220',\n frequency=slice(0, 30)\n).pairwise_spectral_granger_prediction.plot(x='time', y='frequency');\nExplanation: Show the same thing for spectral granger.\nEnd of explanation\nds['pairwise_spectral_granger_prediction'].sel(\n frequency=slice(0, 30)).plot(x='time', y='frequency', col='tetrode1', row='tetrode2', robust=True);\nds['coherence_magnitude'].sel(\n frequency=slice(0, 30)).plot(x='time', y='frequency', col='tetrode1', row='tetrode2');\nExplanation: Now show that we can plot all tetrodes pairs in a dataset\nEnd of explanation\n(ds.sel(\n tetrode1=ds.tetrode1[ds.brain_area1=='CA1'],\n tetrode2=ds.tetrode2[ds.brain_area2=='PFC'],\n frequency=slice(0, 30))\n .coherence_magnitude\n .plot(x='time', y='frequency', col='tetrode1', row='tetrode2'));\nExplanation: It is also easy to select a subset of tetrode pairs (in this case all CA1-PFC tetrode pairs).\nEnd of explanation\n((ds - ds.isel(time=0)).sel(\n tetrode1=ds.tetrode1[ds.brain_area1=='CA1'],\n tetrode2=ds.tetrode2[ds.brain_area2=='PFC'],\n frequency=slice(0, 30))\n .coherence_magnitude\n .plot(x='time', y='frequency', col='tetrode1', row='tetrode2'));\nExplanation: xarray also makes it easy to compare the difference of a connectivity measure from its baseline (in this case, the baseline is the first time bin)\nEnd of explanation\n(ds.sel(\n tetrode1=ds.tetrode1[ds.brain_area1=='CA1'],\n tetrode2=ds.tetrode2[ds.brain_area2=='PFC'],\n frequency=slice(0, 30))\n .coherence_magnitude.mean(['tetrode1', 'tetrode2'])\n .plot(x='time', y='frequency'));\nExplanation: It is also easy to average over the tetrode pairs\nEnd of explanation\n((ds - ds.isel(time=0)).sel(\n tetrode1=ds.tetrode1[ds.brain_area1=='CA1'],\n tetrode2=ds.tetrode2[ds.brain_area2=='PFC'],\n frequency=slice(0, 30))\n .coherence_magnitude.mean(['tetrode1', 'tetrode2'])\n .plot(x='time', y='frequency'));\nExplanation: And also average over the difference\nEnd of explanation\nimport os\npath = '{0}_{1:02d}_{2:02d}.nc'.format(*epoch_key)\ngroup = '{0}/'.format(multitaper_parameter_name)\nwrite_mode = 'a' if os.path.isfile(path) else 'w'\nds.to_netcdf(path=path, group=group, mode=write_mode)\nExplanation: Test saving as netcdf file\nEnd of explanation\nwith xr.open_dataset(path, group=group) as da:\n da.load()\n print(da)\nExplanation: Show that we can open the saved dataset and recover the data\nEnd of explanation\nn_bands = len(FREQUENCY_BANDS)\ndelay, slope, r_value = (np.zeros((c.time.size, n_bands, m.n_signals, m.n_signals)),) * 3\nfor band_ind, frequency_band in enumerate(FREQUENCY_BANDS):\n (delay[:, band_ind, ...],\n slope[:, band_ind, ...],\n r_value[:, band_ind, ...]) = c.group_delay(\n FREQUENCY_BANDS[frequency_band], frequency_resolution=m.frequency_resolution)\n \ncoordinate_names = ['time', 'frequency_band', 'tetrode1', 'tetrode2']\nds = xr.Dataset(\n {'delay': (coordinate_names, delay),\n 'slope': (coordinate_names, slope),\n 'r_value': (coordinate_names, r_value)},\n coords={'time': c.time + np.diff(c.time)[0] / 2, \n 'frequency_band': list(FREQUENCY_BANDS.keys()),\n 'tetrode1': tetrode_info.tetrode_id.values,\n 'tetrode2': tetrode_info.tetrode_id.values,\n 'brain_area1': ('tetrode1', tetrode_info.area.tolist()),\n 'brain_area2': ('tetrode2', tetrode_info.area.tolist()),\n 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]),\n }\n)\nds['delay'].sel(frequency_band='beta', tetrode1='HPa621', tetrode2='HPa622').plot();\nExplanation: Make data structure for group delay\nEnd of explanation\ncanonical_coherence, area_labels = c.canonical_coherence(tetrode_info.area.tolist())\ndimension_names = ['time', 'frequency', 'brain_area1', 'brain_area2']\ndata_vars = {'canonical_coherence': (dimension_names, canonical_coherence)}\ncoordinates = {\n 'time': c.time + np.diff(c.time)[0] / 2,\n 'frequency': c.frequencies + np.diff(c.frequencies)[0] / 2,\n 'brain_area1': area_labels,\n 'brain_area2': area_labels,\n 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]),\n}\nds = xr.Dataset(data_vars, coords=coordinates)\nds.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency')\nExplanation: Make data structure for canonical coherence\nEnd of explanation\nfrom src.analysis import ripple_triggered_connectivity\nfor parameters_name, parameters in MULTITAPER_PARAMETERS.items():\n ripple_triggered_connectivity(\n lfps, epoch_key, tetrode_info, ripple_times, parameters,\n FREQUENCY_BANDS,\n multitaper_parameter_name=parameters_name,\n group_name='all_ripples')\nwith xr.open_dataset(path, group='2Hz_Resolution/all_ripples/canonical_coherence') as da:\n da.load()\n print(da)\n da.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency')\nwith xr.open_dataset(path, group='10Hz_Resolution/all_ripples/canonical_coherence') as da:\n da.load()\n print(da)\n da.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency')\nExplanation: Now after adding this code into the code base, test if we can compute, save, and load\nEnd of explanation"}}},{"rowIdx":2108,"cells":{"Unnamed: 0":{"kind":"number","value":2108,"string":"2,108"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Chapter 7 – Ensemble Learning and Random Forests\nThis notebook contains all the sample code and solutions to the exercises in chapter 7.\nSetup\nFirst, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures\nStep1: Voting classifiers\nStep2: Bagging ensembles\nStep3: Random Forests\nStep4: Out-of-Bag evaluation\nStep5: Feature importance\nStep6: AdaBoost\nStep7: Gradient Boosting\nStep8: Gradient Boosting with Early stopping"},"code_prompt":{"kind":"string","value":"Python Code:\n# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n# Common imports\nimport numpy as np\nimport os\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"ensembles\"\ndef image_path(fig_id):\n return os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id)\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format='png', dpi=300)\nExplanation: Chapter 7 – Ensemble Learning and Random Forests\nThis notebook contains all the sample code and solutions to the exercises in chapter 7.\nSetup\nFirst, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:\nEnd of explanation\nheads_proba = 0.51\ncoin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)\ncumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)\nplt.figure(figsize=(8,3.5))\nplt.plot(cumulative_heads_ratio)\nplt.plot([0, 10000], [0.51, 0.51], \"k--\", linewidth=2, label=\"51%\")\nplt.plot([0, 10000], [0.5, 0.5], \"k-\", label=\"50%\")\nplt.xlabel(\"Number of coin tosses\")\nplt.ylabel(\"Heads ratio\")\nplt.legend(loc=\"lower right\")\nplt.axis([0, 10000, 0.42, 0.58])\nsave_fig(\"law_of_large_numbers_plot\")\nplt.show()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import make_moons\nX, y = make_moons(n_samples=500, noise=0.30, random_state=42)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nlog_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(random_state=42)\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\nvoting_clf.fit(X_train, y_train)\nfrom sklearn.metrics import accuracy_score\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\nlog_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(probability=True, random_state=42)\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='soft')\nvoting_clf.fit(X_train, y_train)\nfrom sklearn.metrics import accuracy_score\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\nExplanation: Voting classifiers\nEnd of explanation\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, y_pred))\ntree_clf = DecisionTreeClassifier(random_state=42)\ntree_clf.fit(X_train, y_train)\ny_pred_tree = tree_clf.predict(X_test)\nprint(accuracy_score(y_test, y_pred_tree))\nfrom matplotlib.colors import ListedColormap\ndef plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap, linewidth=10)\n if contour:\n custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"yo\", alpha=alpha)\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"bs\", alpha=alpha)\n plt.axis(axes)\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_decision_boundary(tree_clf, X, y)\nplt.title(\"Decision Tree\", fontsize=14)\nplt.subplot(122)\nplot_decision_boundary(bag_clf, X, y)\nplt.title(\"Decision Trees with Bagging\", fontsize=14)\nsave_fig(\"decision_tree_without_and_with_bagging_plot\")\nplt.show()\nExplanation: Bagging ensembles\nEnd of explanation\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(splitter=\"random\", max_leaf_nodes=16, random_state=42),\n n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nfrom sklearn.ensemble import RandomForestClassifier\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1, random_state=42)\nrnd_clf.fit(X_train, y_train)\ny_pred_rf = rnd_clf.predict(X_test)\nnp.sum(y_pred == y_pred_rf) / len(y_pred) # almost identical predictions\nfrom sklearn.datasets import load_iris\niris = load_iris()\nrnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, random_state=42)\nrnd_clf.fit(iris[\"data\"], iris[\"target\"])\nfor name, score in zip(iris[\"feature_names\"], rnd_clf.feature_importances_):\n print(name, score)\nrnd_clf.feature_importances_\nplt.figure(figsize=(6, 4))\nfor i in range(15):\n tree_clf = DecisionTreeClassifier(max_leaf_nodes=16, random_state=42 + i)\n indices_with_replacement = np.random.randint(0, len(X_train), len(X_train))\n tree_clf.fit(X[indices_with_replacement], y[indices_with_replacement])\n plot_decision_boundary(tree_clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.02, contour=False)\nplt.show()\nExplanation: Random Forests\nEnd of explanation\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n bootstrap=True, n_jobs=-1, oob_score=True, random_state=40)\nbag_clf.fit(X_train, y_train)\nbag_clf.oob_score_\nbag_clf.oob_decision_function_\nfrom sklearn.metrics import accuracy_score\ny_pred = bag_clf.predict(X_test)\naccuracy_score(y_test, y_pred)\nExplanation: Out-of-Bag evaluation\nEnd of explanation\nfrom sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original')\nrnd_clf = RandomForestClassifier(random_state=42)\nrnd_clf.fit(mnist[\"data\"], mnist[\"target\"])\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap = matplotlib.cm.hot,\n interpolation=\"nearest\")\n plt.axis(\"off\")\nplot_digit(rnd_clf.feature_importances_)\ncbar = plt.colorbar(ticks=[rnd_clf.feature_importances_.min(), rnd_clf.feature_importances_.max()])\ncbar.ax.set_yticklabels(['Not important', 'Very important'])\nsave_fig(\"mnist_feature_importance_plot\")\nplt.show()\nExplanation: Feature importance\nEnd of explanation\nfrom sklearn.ensemble import AdaBoostClassifier\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)\nplot_decision_boundary(ada_clf, X, y)\nm = len(X_train)\nplt.figure(figsize=(11, 4))\nfor subplot, learning_rate in ((121, 1), (122, 0.5)):\n sample_weights = np.ones(m)\n for i in range(5):\n plt.subplot(subplot)\n svm_clf = SVC(kernel=\"rbf\", C=0.05, random_state=42)\n svm_clf.fit(X_train, y_train, sample_weight=sample_weights)\n y_pred = svm_clf.predict(X_train)\n sample_weights[y_pred != y_train] *= (1 + learning_rate)\n plot_decision_boundary(svm_clf, X, y, alpha=0.2)\n plt.title(\"learning_rate = {}\".format(learning_rate), fontsize=16)\nplt.subplot(121)\nplt.text(-0.7, -0.65, \"1\", fontsize=14)\nplt.text(-0.6, -0.10, \"2\", fontsize=14)\nplt.text(-0.5, 0.10, \"3\", fontsize=14)\nplt.text(-0.4, 0.55, \"4\", fontsize=14)\nplt.text(-0.3, 0.90, \"5\", fontsize=14)\nsave_fig(\"boosting_plot\")\nplt.show()\nlist(m for m in dir(ada_clf) if not m.startswith(\"_\") and m.endswith(\"_\"))\nExplanation: AdaBoost\nEnd of explanation\nnp.random.seed(42)\nX = np.random.rand(100, 1) - 0.5\ny = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)\nfrom sklearn.tree import DecisionTreeRegressor\ntree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg1.fit(X, y)\ny2 = y - tree_reg1.predict(X)\ntree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg2.fit(X, y2)\ny3 = y2 - tree_reg2.predict(X)\ntree_reg3 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg3.fit(X, y3)\nX_new = np.array([[0.8]])\ny_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))\ny_pred\ndef plot_predictions(regressors, X, y, axes, label=None, style=\"r-\", data_style=\"b.\", data_label=None):\n x1 = np.linspace(axes[0], axes[1], 500)\n y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)\n plt.plot(X[:, 0], y, data_style, label=data_label)\n plt.plot(x1, y_pred, style, linewidth=2, label=label)\n if label or data_label:\n plt.legend(loc=\"upper center\", fontsize=16)\n plt.axis(axes)\nplt.figure(figsize=(11,11))\nplt.subplot(321)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h_1(x_1)$\", style=\"g-\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Residuals and tree predictions\", fontsize=16)\nplt.subplot(322)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1)$\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Ensemble predictions\", fontsize=16)\nplt.subplot(323)\nplot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_2(x_1)$\", style=\"g-\", data_style=\"k+\", data_label=\"Residuals\")\nplt.ylabel(\"$y - h_1(x_1)$\", fontsize=16)\nplt.subplot(324)\nplot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1)$\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.subplot(325)\nplot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_3(x_1)$\", style=\"g-\", data_style=\"k+\")\nplt.ylabel(\"$y - h_1(x_1) - h_2(x_1)$\", fontsize=16)\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.subplot(326)\nplot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$\")\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nsave_fig(\"gradient_boosting_plot\")\nplt.show()\nfrom sklearn.ensemble import GradientBoostingRegressor\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0, random_state=42)\ngbrt.fit(X, y)\ngbrt_slow = GradientBoostingRegressor(max_depth=2, n_estimators=200, learning_rate=0.1, random_state=42)\ngbrt_slow.fit(X, y)\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"Ensemble predictions\")\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt.learning_rate, gbrt.n_estimators), fontsize=14)\nplt.subplot(122)\nplot_predictions([gbrt_slow], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt_slow.learning_rate, gbrt_slow.n_estimators), fontsize=14)\nsave_fig(\"gbrt_learning_rate_plot\")\nplt.show()\nExplanation: Gradient Boosting\nEnd of explanation\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nX_train, X_val, y_train, y_val = train_test_split(X, y, random_state=49)\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)\ngbrt.fit(X_train, y_train)\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in gbrt.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors)\ngbrt_best = GradientBoostingRegressor(max_depth=2,n_estimators=bst_n_estimators, random_state=42)\ngbrt_best.fit(X_train, y_train)\nmin_error = np.min(errors)\nplt.figure(figsize=(11, 4))\nplt.subplot(121)\nplt.plot(errors, \"b.-\")\nplt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], \"k--\")\nplt.plot([0, 120], [min_error, min_error], \"k--\")\nplt.plot(bst_n_estimators, min_error, \"ko\")\nplt.text(bst_n_estimators, min_error*1.2, \"Minimum\", ha=\"center\", fontsize=14)\nplt.axis([0, 120, 0, 0.01])\nplt.xlabel(\"Number of trees\")\nplt.title(\"Validation error\", fontsize=14)\nplt.subplot(122)\nplot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"Best model (%d trees)\" % bst_n_estimators, fontsize=14)\nsave_fig(\"early_stopping_gbrt_plot\")\nplt.show()\ngbrt = GradientBoostingRegressor(max_depth=2, warm_start=True, random_state=42)\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\nprint(gbrt.n_estimators)\nExplanation: Gradient Boosting with Early stopping\nEnd of explanation"}}},{"rowIdx":2109,"cells":{"Unnamed: 0":{"kind":"number","value":2109,"string":"2,109"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n AGDCv2 Landsat analytics example using USGS Surface Reflectance\nImport the required libraries\nStep2: Include some helpful functions\nStep3: Plot the spatial extent of our data for each product\nStep4: Inspect the available measurements for each product\nStep5: Specify the Area of Interest for our analysis\nStep6: Load Landsat Surface Reflectance for our Area of Interest\nStep7: Load Landsat Pixel Quality for our area of interest\nStep8: Visualise pixel quality information from our selected spatiotemporal subset\nStep9: Plot the frequency of water classified in pixel quality\nStep10: Plot the timeseries at the center point of the image\nStep11: Remove the cloud and shadow pixels from the surface reflectance\nStep12: Spatiotemporal summary NDVI median\nStep13: NDVI trend over time in cropping area Point Of Interest\nStep14: Create a subset around our point of interest\nStep15: Plot subset image with POI at centre\nStep16: NDVI timeseries plot"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport datacube\nfrom datacube.model import Range\nfrom datetime import datetime\ndc = datacube.Datacube(app='dc-example')\nfrom datacube.storage import masking\nfrom datacube.storage.masking import mask_valid_data as mask_invalid_data\nimport pandas\nimport xarray\nimport numpy\nimport json\nimport vega\nfrom datacube.utils import geometry\nnumpy.seterr(divide='ignore', invalid='ignore')\nimport folium\nfrom IPython.display import display\nimport geopandas\nfrom shapely.geometry import mapping\nfrom shapely.geometry import MultiPolygon\nimport rasterio\nimport shapely.geometry\nimport shapely.ops\nfrom functools import partial\nimport pyproj\nfrom datacube.model import CRS\nfrom datacube.utils import geometry\n## From http://scikit-image.org/docs/dev/auto_examples/plot_equalize.html\nfrom skimage import data, img_as_float\nfrom skimage import exposure\ndatacube.__version__\nExplanation: AGDCv2 Landsat analytics example using USGS Surface Reflectance\nImport the required libraries\nEnd of explanation\ndef datasets_union(dss):\n thing = geometry.unary_union(ds.extent for ds in dss)\n return thing.to_crs(geometry.CRS('EPSG:4326'))\nimport random\ndef plot_folium(shapes):\n mapa = folium.Map(location=[17.38,78.48], zoom_start=8)\n colors=['#00ff00', '#ff0000', '#00ffff', '#ffffff', '#000000', '#ff00ff']\n for shape in shapes:\n style_function = lambda x: {'fillColor': '#000000' if x['type'] == 'Polygon' else '#00ff00', \n 'color' : random.choice(colors)}\n poly = folium.features.GeoJson(mapping(shape), style_function=style_function)\n mapa.add_children(poly)\n display(mapa)\n# determine the clip parameters for a target clear (cloud free image) - identified through the index provided\ndef get_p2_p98(rgb, red, green, blue, index):\n r = numpy.nan_to_num(numpy.array(rgb.data_vars[red][index]))\n g = numpy.nan_to_num(numpy.array(rgb.data_vars[green][index]))\n b = numpy.nan_to_num(numpy.array(rgb.data_vars[blue][index]))\n \n rp2, rp98 = numpy.percentile(r, (2, 99))\n gp2, gp98 = numpy.percentile(g, (2, 99)) \n bp2, bp98 = numpy.percentile(b, (2, 99))\n return(rp2, rp98, gp2, gp98, bp2, bp98)\ndef plot_rgb(rgb, rp2, rp98, gp2, gp98, bp2, bp98, red, green, blue, index):\n r = numpy.nan_to_num(numpy.array(rgb.data_vars[red][index]))\n g = numpy.nan_to_num(numpy.array(rgb.data_vars[green][index]))\n b = numpy.nan_to_num(numpy.array(rgb.data_vars[blue][index]))\n r_rescale = exposure.rescale_intensity(r, in_range=(rp2, rp98))\n g_rescale = exposure.rescale_intensity(g, in_range=(gp2, gp98))\n b_rescale = exposure.rescale_intensity(b, in_range=(bp2, bp98))\n rgb_stack = numpy.dstack((r_rescale,g_rescale,b_rescale))\n img = img_as_float(rgb_stack)\n return(img)\ndef plot_water_pixel_drill(water_drill):\n vega_data = [{'x': str(ts), 'y': str(v)} for ts, v in zip(water_drill.time.values, water_drill.values)]\n vega_spec = {\"width\":720,\"height\":90,\"padding\":{\"top\":10,\"left\":80,\"bottom\":60,\"right\":30},\"data\":[{\"name\":\"wofs\",\"values\":[{\"code\":0,\"class\":\"dry\",\"display\":\"Dry\",\"color\":\"#D99694\",\"y_top\":30,\"y_bottom\":50},{\"code\":1,\"class\":\"nodata\",\"display\":\"No Data\",\"color\":\"#A0A0A0\",\"y_top\":60,\"y_bottom\":80},{\"code\":2,\"class\":\"shadow\",\"display\":\"Shadow\",\"color\":\"#A0A0A0\",\"y_top\":60,\"y_bottom\":80},{\"code\":4,\"class\":\"cloud\",\"display\":\"Cloud\",\"color\":\"#A0A0A0\",\"y_top\":60,\"y_bottom\":80},{\"code\":1,\"class\":\"wet\",\"display\":\"Wet\",\"color\":\"#4F81BD\",\"y_top\":0,\"y_bottom\":20},{\"code\":3,\"class\":\"snow\",\"display\":\"Snow\",\"color\":\"#4F81BD\",\"y_top\":0,\"y_bottom\":20},{\"code\":255,\"class\":\"fill\",\"display\":\"Fill\",\"color\":\"#4F81BD\",\"y_top\":0,\"y_bottom\":20}]},{\"name\":\"table\",\"format\":{\"type\":\"json\",\"parse\":{\"x\":\"date\"}},\"values\":[],\"transform\":[{\"type\":\"lookup\",\"on\":\"wofs\",\"onKey\":\"code\",\"keys\":[\"y\"],\"as\":[\"class\"],\"default\":null},{\"type\":\"filter\",\"test\":\"datum.y != 255\"}]}],\"scales\":[{\"name\":\"x\",\"type\":\"time\",\"range\":\"width\",\"domain\":{\"data\":\"table\",\"field\":\"x\"},\"round\":true},{\"name\":\"y\",\"type\":\"ordinal\",\"range\":\"height\",\"domain\":[\"water\",\"not water\",\"not observed\"],\"nice\":true}],\"axes\":[{\"type\":\"x\",\"scale\":\"x\",\"formatType\":\"time\"},{\"type\":\"y\",\"scale\":\"y\",\"tickSize\":0}],\"marks\":[{\"description\":\"data plot\",\"type\":\"rect\",\"from\":{\"data\":\"table\"},\"properties\":{\"enter\":{\"xc\":{\"scale\":\"x\",\"field\":\"x\"},\"width\":{\"value\":\"1\"},\"y\":{\"field\":\"class.y_top\"},\"y2\":{\"field\":\"class.y_bottom\"},\"fill\":{\"field\":\"class.color\"},\"strokeOpacity\":{\"value\":\"0\"}}}}]}\n spec_obj = json.loads(vega_spec)\n spec_obj['data'][1]['values'] = vega_data\n return vega.Vega(spec_obj)\nExplanation: Include some helpful functions\nEnd of explanation\nplot_folium([datasets_union(dc.index.datasets.search_eager(product='ls5_ledaps_scene')),\\\n datasets_union(dc.index.datasets.search_eager(product='ls7_ledaps_scene')),\\\n datasets_union(dc.index.datasets.search_eager(product='ls8_ledaps_scene'))])\nExplanation: Plot the spatial extent of our data for each product\nEnd of explanation\ndc.list_measurements()\nExplanation: Inspect the available measurements for each product\nEnd of explanation\n# Hyderbad\n# 'lon': (78.40, 78.57),\n# 'lat': (17.36, 17.52),\n# Lake Singur\n# 'lat': (17.67, 17.84),\n# 'lon': (77.83, 78.0),\n# Lake Singur Dam\nquery = {\n 'lat': (17.72, 17.79),\n 'lon': (77.88, 77.95),\n}\nExplanation: Specify the Area of Interest for our analysis\nEnd of explanation\nproducts = ['ls5_ledaps_scene','ls7_ledaps_scene','ls8_ledaps_scene']\ndatasets = []\nfor product in products:\n ds = dc.load(product=product, measurements=['nir','red', 'green','blue'], output_crs='EPSG:32644',resolution=(-30,30), **query)\n ds['product'] = ('time', numpy.repeat(product, ds.time.size))\n datasets.append(ds)\nsr = xarray.concat(datasets, dim='time')\nsr = sr.isel(time=sr.time.argsort()) # sort along time dim\nsr = sr.where(sr != -9999)\n##### include an index here for the timeslice with representative data for best stretch of time series\n# don't run this to keep the same limits as the previous sensor\n#rp2, rp98, gp2, gp98, bp2, bp98 = get_p2_p98(sr,'red','green','blue', 0)\nrp2, rp98, gp2, gp98, bp2, bp98 = (300.0, 2000.0, 300.0, 2000.0, 300.0, 2000.0)\nprint(rp2, rp98, gp2, gp98, bp2, bp98)\nplt.imshow(plot_rgb(sr,rp2, rp98, gp2, gp98, bp2, bp98,'red',\n 'green', 'blue', 0),interpolation='nearest')\nExplanation: Load Landsat Surface Reflectance for our Area of Interest\nEnd of explanation\ndatasets = []\nfor product in products:\n ds = dc.load(product=product, measurements=['cfmask'], output_crs='EPSG:32644',resolution=(-30,30), **query).cfmask\n ds['product'] = ('time', numpy.repeat(product, ds.time.size))\n datasets.append(ds)\npq = xarray.concat(datasets, dim='time')\npq = pq.isel(time=pq.time.argsort()) # sort along time dim\ndel(datasets)\nExplanation: Load Landsat Pixel Quality for our area of interest\nEnd of explanation\npq.attrs['flags_definition'] = {'cfmask': {'values': {'255': 'fill', '1': 'water', '2': 'shadow', '3': 'snow', '4': 'cloud', '0': 'clear'}, 'description': 'CFmask', 'bits': [0, 1, 2, 3, 4, 5, 6, 7]}}\npandas.DataFrame.from_dict(masking.get_flags_def(pq), orient='index')\nExplanation: Visualise pixel quality information from our selected spatiotemporal subset\nEnd of explanation\nwater = masking.make_mask(pq, cfmask ='water')\nwater.sum('time').plot(cmap='nipy_spectral')\nExplanation: Plot the frequency of water classified in pixel quality\nEnd of explanation\nplot_water_pixel_drill(pq.isel(y=int(water.shape[1] / 2), x=int(water.shape[2] / 2)))\ndel(water)\nExplanation: Plot the timeseries at the center point of the image\nEnd of explanation\nmask = masking.make_mask(pq, cfmask ='cloud')\nmask = abs(mask*-1+1)\nsr = sr.where(mask)\nmask = masking.make_mask(pq, cfmask ='shadow')\nmask = abs(mask*-1+1)\nsr = sr.where(mask)\ndel(mask)\ndel(pq)\nsr.attrs['crs'] = CRS('EPSG:32644')\nExplanation: Remove the cloud and shadow pixels from the surface reflectance\nEnd of explanation\nndvi_median = ((sr.nir-sr.red)/(sr.nir+sr.red)).median(dim='time')\nndvi_median.attrs['crs'] = CRS('EPSG:32644')\nndvi_median.plot(cmap='YlGn', robust='True')\nExplanation: Spatiotemporal summary NDVI median\nEnd of explanation\npoi_latitude = 17.749343\npoi_longitude = 77.935634\np = geometry.point(x=poi_longitude, y=poi_latitude, crs=geometry.CRS('EPSG:4326')).to_crs(sr.crs)\nExplanation: NDVI trend over time in cropping area Point Of Interest\nEnd of explanation\nsubset = sr.sel(x=((sr.x > p.points[0][0]-1000)), y=((sr.y < p.points[0][1]+1000)))\nsubset = subset.sel(x=((subset.x < p.points[0][0]+1000)), y=((subset.y > p.points[0][1]-1000)))\nExplanation: Create a subset around our point of interest\nEnd of explanation\nplt.imshow(plot_rgb(subset,rp2, rp98, gp2, gp98, bp2, bp98,'red',\n 'green', 'blue',0),interpolation='nearest' )\nExplanation: Plot subset image with POI at centre\nEnd of explanation\n((sr.nir-sr.red)/(sr.nir+sr.red)).sel(x=p.points[0][0], y=p.points[0][1], method='nearest').plot(marker='o')\nExplanation: NDVI timeseries plot\nEnd of explanation"}}},{"rowIdx":2110,"cells":{"Unnamed: 0":{"kind":"number","value":2110,"string":"2,110"},"text_prompt":{"kind":"string","value":"Given the following text problem statement, write Python code to implement the functionality described below in problem statement\nProblem:\nI am having a problem with minimization procedure. Actually, I could not create a correct objective function for my problem."},"code_prompt":{"kind":"string","value":"Problem:\nimport scipy.optimize\nimport numpy as np\nnp.random.seed(42)\na = np.random.rand(3,5)\nx_true = np.array([10, 13, 5, 8, 40])\ny = a.dot(x_true ** 2)\nx0 = np.array([2, 3, 1, 4, 20])\nx_lower_bounds = x_true / 2\ndef residual_ans(x, a, y):\n s = ((y - a.dot(x**2))**2).sum()\n return s\nbounds = [[x, None] for x in x_lower_bounds]\nout = scipy.optimize.minimize(residual_ans, x0=x0, args=(a, y), method= 'L-BFGS-B', bounds=bounds).x"}}},{"rowIdx":2111,"cells":{"Unnamed: 0":{"kind":"number","value":2111,"string":"2,111"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Logistic Regression Classification\nStep1: Utility function to create the appropriate data frame for classification algorithms in MLlib\nStep2: create the dataframe from a csv\nStep3: Classification algorithms requires numeric values for labels\nStep4: schema verification\nStep5: Instantiate the Logistic Regression and the pipeline.\nStep6: We use a ParamGridBuilder to construct a grid of parameters to search over.\nTrainValidationSplit will try all combinations of values and determine best model using the evaluator.\nStep7: In this case the estimator is simply the linear regression.\nA TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.\nStep8: Fit the pipeline to training documents.\nStep9: Compute the predictions from the model"},"code_prompt":{"kind":"string","value":"Python Code:\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml import Pipeline\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.mllib.evaluation import MulticlassMetrics\nfrom pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit\nExplanation: Logistic Regression Classification\nEnd of explanation\ndef mapLibSVM(row): \n return (row[5],Vectors.dense(row[:3]))\nExplanation: Utility function to create the appropriate data frame for classification algorithms in MLlib\nEnd of explanation\ndf = spark.read \\\n .format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .load(\"datasets/iris.data\")\nExplanation: create the dataframe from a csv\nEnd of explanation\nindexer = StringIndexer(inputCol=\"label\", outputCol=\"labelIndex\")\nindexer = indexer.fit(df).transform(df)\nindexer.show()\ndfLabeled = indexer.rdd.map(mapLibSVM).toDF([\"label\", \"features\"])\ndfLabeled.show()\ntrain, test = dfLabeled.randomSplit([0.9, 0.1], seed=12345)\nExplanation: Classification algorithms requires numeric values for labels\nEnd of explanation\ntrain.printSchema()\nExplanation: schema verification\nEnd of explanation\nlr = LogisticRegression(labelCol=\"label\", maxIter=10)\nExplanation: Instantiate the Logistic Regression and the pipeline.\nEnd of explanation\nparamGrid = ParamGridBuilder()\\\n .addGrid(lr.regParam, [0.1, 0.001]) \\\n .build()\nExplanation: We use a ParamGridBuilder to construct a grid of parameters to search over.\nTrainValidationSplit will try all combinations of values and determine best model using the evaluator.\nEnd of explanation\ntvs = TrainValidationSplit(estimator=lr,\n estimatorParamMaps=paramGrid,\n evaluator=RegressionEvaluator(),\n # 80% of the data will be used for training, 20% for validation.\n trainRatio=0.9)\nExplanation: In this case the estimator is simply the linear regression.\nA TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.\nEnd of explanation\nmodel = tvs.fit(train)\nExplanation: Fit the pipeline to training documents.\nEnd of explanation\nresult = model.transform(test)\npredictions = result.select([\"prediction\", \"label\"])\npredictions.show()\n# Instantiate metrics object\nmetrics = MulticlassMetrics(predictions.rdd)\n# Overall statistics\nprint(\"Summary Stats\")\nprint(\"Precision = %s\" % metrics.precision())\nprint(\"Recall = %s\" % metrics.recall())\nprint(\"F1 Score = %s\" % metrics.fMeasure())\nprint(\"Accuracy = %s\" % metrics.accuracy)\n# Weighted stats\nprint(\"Weighted recall = %s\" % metrics.weightedRecall)\nprint(\"Weighted precision = %s\" % metrics.weightedPrecision)\nprint(\"Weighted F(1) Score = %s\" % metrics.weightedFMeasure())\nprint(\"Weighted F(0.5) Score = %s\" % metrics.weightedFMeasure(beta=0.5))\nprint(\"Weighted false positive rate = %s\" % metrics.weightedFalsePositiveRate)\nExplanation: Compute the predictions from the model\nEnd of explanation"}}},{"rowIdx":2112,"cells":{"Unnamed: 0":{"kind":"number","value":2112,"string":"2,112"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Subject Selection Experiments disorder data - Srinivas (handle\nStep1: Extracting the samples we are interested in\nStep2: Dimensionality reduction\nManifold Techniques\nISOMAP\nStep3: Clustering and other grouping experiments\nK-Means clustering - iso\nStep4: As is evident from the above 2 experiments, no clear clustering is apparent.But there is some significant overlap and there 2 clear groups\nClassification Experiments\nLet's experiment with a bunch of classifiers"},"code_prompt":{"kind":"string","value":"Python Code:\n# Standard\nimport pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\n# Dimensionality reduction and Clustering\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nfrom sklearn import manifold, datasets\nfrom itertools import cycle\n# Plotting tools and classifiers\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA\nfrom sklearn import cross_validation\nfrom sklearn.cross_validation import LeaveOneOut\n# Let's read the data in and clean it\ndef get_NaNs(df):\n columns = list(df.columns.get_values()) \n row_metrics = df.isnull().sum(axis=1)\n rows_with_na = []\n for i, x in enumerate(row_metrics):\n if x > 0: rows_with_na.append(i)\n return rows_with_na\ndef remove_NaNs(df):\n rows_with_na = get_NaNs(df)\n cleansed_df = df.drop(df.index[rows_with_na], inplace=False) \n return cleansed_df\ninitial_data = pd.DataFrame.from_csv('Data_Adults_1_reduced_inv2.csv')\ncleansed_df = remove_NaNs(initial_data)\n# Let's also get rid of nominal data\nnumerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\nX = cleansed_df.select_dtypes(include=numerics)\nprint X.shape\n# Let's now clean columns getting rid of certain columns that might not be important to our analysis\ncols2drop = ['GROUP_ID', 'doa', 'Baseline_header_id', 'Concentration_header_id']\nX = X.drop(cols2drop, axis=1, inplace=False)\nprint X.shape\n# For our studies children skew the data, it would be cleaner to just analyse adults\nX = X.loc[X['Age'] >= 18]\nY = X.loc[X['race_id'] == 1]\nX = X.loc[X['Gender_id'] == 1]\nprint X.shape\nprint Y.shape\nExplanation: Subject Selection Experiments disorder data - Srinivas (handle: thewickedaxe)\nInitial Data Cleaning\nEnd of explanation\n# Let's extract ADHd and Bipolar patients (mutually exclusive)\nADHD_men = X.loc[X['ADHD'] == 1]\nADHD_men = ADHD_men.loc[ADHD_men['Bipolar'] == 0]\nBP_men = X.loc[X['Bipolar'] == 1]\nBP_men = BP_men.loc[BP_men['ADHD'] == 0]\nADHD_cauc = Y.loc[Y['ADHD'] == 1]\nADHD_cauc = ADHD_cauc.loc[ADHD_cauc['Bipolar'] == 0]\nBP_cauc = Y.loc[Y['Bipolar'] == 1]\nBP_cauc = BP_cauc.loc[BP_cauc['ADHD'] == 0]\nprint ADHD_men.shape\nprint BP_men.shape\nprint ADHD_cauc.shape\nprint BP_cauc.shape\n# Keeping a backup of the data frame object because numpy arrays don't play well with certain scikit functions\nADHD_men = pd.DataFrame(ADHD_men.drop(['Patient_ID', 'Gender_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False))\nBP_men = pd.DataFrame(BP_men.drop(['Patient_ID', 'Gender_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False))\nADHD_cauc = pd.DataFrame(ADHD_cauc.drop(['Patient_ID', 'race_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False))\nBP_cauc = pd.DataFrame(BP_cauc.drop(['Patient_ID', 'race_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False))\nExplanation: Extracting the samples we are interested in\nEnd of explanation\ncombined1 = pd.concat([ADHD_men, BP_men])\ncombined2 = pd.concat([ADHD_cauc, BP_cauc])\nprint combined1.shape\nprint combined2.shape\ncombined1 = preprocessing.scale(combined1)\ncombined2 = preprocessing.scale(combined2)\ncombined1 = manifold.Isomap(20, 20).fit_transform(combined1)\nADHD_men_iso = combined1[:1326]\nBP_men_iso = combined1[1326:]\ncombined2 = manifold.Isomap(20, 20).fit_transform(combined2)\nADHD_cauc_iso = combined2[:1379]\nBP_cauc_iso = combined2[1379:]\nExplanation: Dimensionality reduction\nManifold Techniques\nISOMAP\nEnd of explanation\ndata1 = pd.concat([pd.DataFrame(ADHD_men_iso), pd.DataFrame(BP_men_iso)])\ndata2 = pd.concat([pd.DataFrame(ADHD_cauc_iso), pd.DataFrame(BP_cauc_iso)])\nprint data1.shape\nprint data2.shape\nkmeans = KMeans(n_clusters=2)\nkmeans.fit(data1.get_values())\nlabels1 = kmeans.labels_\ncentroids1 = kmeans.cluster_centers_\nprint('Estimated number of clusters: %d' % len(centroids1))\nfor label in [0, 1]:\n ds = data1.get_values()[np.where(labels1 == label)] \n plt.plot(ds[:,0], ds[:,1], '.') \n lines = plt.plot(centroids1[label,0], centroids1[label,1], 'o')\nkmeans = KMeans(n_clusters=2)\nkmeans.fit(data2.get_values())\nlabels2 = kmeans.labels_\ncentroids2 = kmeans.cluster_centers_\nprint('Estimated number of clusters: %d' % len(centroids2))\nfor label in [0, 1]:\n ds2 = data2.get_values()[np.where(labels2 == label)] \n plt.plot(ds2[:,0], ds2[:,1], '.') \n lines = plt.plot(centroids2[label,0], centroids2[label,1], 'o')\nExplanation: Clustering and other grouping experiments\nK-Means clustering - iso\nEnd of explanation\nADHD_men_iso = pd.DataFrame(ADHD_men_iso)\nBP_men_iso = pd.DataFrame(BP_men_iso)\nADHD_cauc_iso = pd.DataFrame(ADHD_cauc_iso)\nBP_cauc_iso = pd.DataFrame(BP_cauc_iso)\nBP_men_iso['ADHD-Bipolar'] = 0\nADHD_men_iso['ADHD-Bipolar'] = 1\nBP_cauc_iso['ADHD-Bipolar'] = 0\nADHD_cauc_iso['ADHD-Bipolar'] = 1\ndata1 = pd.concat([ADHD_men_iso, BP_men_iso])\ndata2 = pd.concat([ADHD_cauc_iso, BP_cauc_iso])\nclass_labels1 = data1['ADHD-Bipolar']\nclass_labels2 = data2['ADHD-Bipolar']\ndata1 = data1.drop(['ADHD-Bipolar'], axis = 1, inplace = False)\ndata2 = data2.drop(['ADHD-Bipolar'], axis = 1, inplace = False)\ndata1 = data1.get_values()\ndata2 = data2.get_values()\n# Leave one Out cross validation\ndef leave_one_out(classifier, values, labels):\n leave_one_out_validator = LeaveOneOut(len(values))\n classifier_metrics = cross_validation.cross_val_score(classifier, values, labels, cv=leave_one_out_validator)\n accuracy = classifier_metrics.mean()\n deviation = classifier_metrics.std()\n return accuracy, deviation\nrf = RandomForestClassifier(n_estimators = 22) \nqda = QDA()\nlda = LDA()\ngnb = GaussianNB()\nclassifier_accuracy_list = []\nclassifiers = [(rf, \"Random Forest\"), (lda, \"LDA\"), (qda, \"QDA\"), (gnb, \"Gaussian NB\")]\nfor classifier, name in classifiers:\n accuracy, deviation = leave_one_out(classifier, data1, class_labels1)\n print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation)\n classifier_accuracy_list.append((name, accuracy))\nfor classifier, name in classifiers:\n accuracy, deviation = leave_one_out(classifier, data2, class_labels2)\n print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation)\n classifier_accuracy_list.append((name, accuracy))\nExplanation: As is evident from the above 2 experiments, no clear clustering is apparent.But there is some significant overlap and there 2 clear groups\nClassification Experiments\nLet's experiment with a bunch of classifiers\nEnd of explanation"}}},{"rowIdx":2113,"cells":{"Unnamed: 0":{"kind":"number","value":2113,"string":"2,113"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Author\nStep1: First let's check if there are new or deleted files (only matching by file names).\nStep2: So we have the same set of files in both versions\nStep3: Let's make sure the structure hasn't changed\nStep4: All files have the same columns as before\nStep5: There are some minor changes in many files, but based on my knowledge of ROME, none from the main files.\nThe most interesting ones are in referentiel_appellation, item, and liens_rome_referentiels, so let's see more precisely.\nStep6: Alright, so the only change seems to be 4 new jobs added. Let's take a look (only showing interesting fields)\nStep7: These seems to be refinements of existing jobs, and two new ones are related to digital mediation.\nOK, let's check at the changes in items\nStep8: As anticipated it is a very minor change (hard to see it visually)\nStep9: The new ones seem legit to me.\nThe changes in liens_rome_referentiels include changes for those items, so let's only check the changes not related to those.\nStep10: So in addition to the added items, there are few fixes. Let's have a look at them"},"code_prompt":{"kind":"string","value":"Python Code:\nimport collections\nimport glob\nimport os\nfrom os import path\nimport matplotlib_venn\nimport pandas as pd\nrome_path = path.join(os.getenv('DATA_FOLDER'), 'rome/csv')\nOLD_VERSION = '338'\nNEW_VERSION = '339'\nold_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(OLD_VERSION)))\nnew_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(NEW_VERSION)))\nExplanation: Author: Pascal, pascal@bayesimpact.org\nDate: 2019-06-25\nROME update from v338 to v339\nIn June 2019 a new version of the ROME was released. I want to investigate what changed and whether we need to do anything about it.\nYou might not be able to reproduce this notebook, mostly because it requires to have the two versions of the ROME in your data/rome/csv folder which happens only just before we switch to v339. You will have to trust me on the results ;-)\nSkip the run test because it requires older versions of the ROME.\nEnd of explanation\nnew_files = new_version_files - frozenset(f.replace(OLD_VERSION, NEW_VERSION) for f in old_version_files)\ndeleted_files = old_version_files - frozenset(f.replace(NEW_VERSION, OLD_VERSION) for f in new_version_files)\nprint('{:d} new files'.format(len(new_files)))\nprint('{:d} deleted files'.format(len(deleted_files)))\nExplanation: First let's check if there are new or deleted files (only matching by file names).\nEnd of explanation\n# Load all ROME datasets for the two versions we compare.\nVersionedDataset = collections.namedtuple('VersionedDataset', ['basename', 'old', 'new'])\nrome_data = [VersionedDataset(\n basename=path.basename(f),\n old=pd.read_csv(f.replace(NEW_VERSION, OLD_VERSION)),\n new=pd.read_csv(f))\n for f in sorted(new_version_files)]\ndef find_rome_dataset_by_name(data, partial_name):\n for dataset in data:\n if 'unix_{}_v{}_utf8.csv'.format(partial_name, NEW_VERSION) == dataset.basename:\n return dataset\n raise ValueError('No dataset named {}, the list is\\n{}'.format(partial_name, [d.basename for d in data]))\nExplanation: So we have the same set of files in both versions: good start.\nNow let's set up a dataset that, for each table, links both the old and the new file together.\nEnd of explanation\nfor dataset in rome_data:\n if set(dataset.old.columns) != set(dataset.new.columns):\n print('Columns of {} have changed.'.format(dataset.basename))\nExplanation: Let's make sure the structure hasn't changed:\nEnd of explanation\nsame_row_count_files = 0\nfor dataset in rome_data:\n diff = len(dataset.new.index) - len(dataset.old.index)\n if diff > 0:\n print('{:d}/{:d} values added in {}'.format(\n diff, len(dataset.new.index), dataset.basename))\n elif diff < 0:\n print('{:d}/{:d} values removed in {}'.format(\n -diff, len(dataset.old.index), dataset.basename))\n else:\n same_row_count_files += 1\nprint('{:d}/{:d} files with the same number of rows'.format(\n same_row_count_files, len(rome_data)))\nExplanation: All files have the same columns as before: still good.\nNow let's see for each file if there are more or less rows.\nEnd of explanation\njobs = find_rome_dataset_by_name(rome_data, 'referentiel_appellation')\nnew_jobs = set(jobs.new.code_ogr) - set(jobs.old.code_ogr)\nobsolete_jobs = set(jobs.old.code_ogr) - set(jobs.new.code_ogr)\nstable_jobs = set(jobs.new.code_ogr) & set(jobs.old.code_ogr)\nmatplotlib_venn.venn2((len(obsolete_jobs), len(new_jobs), len(stable_jobs)), (OLD_VERSION, NEW_VERSION));\nExplanation: There are some minor changes in many files, but based on my knowledge of ROME, none from the main files.\nThe most interesting ones are in referentiel_appellation, item, and liens_rome_referentiels, so let's see more precisely.\nEnd of explanation\npd.options.display.max_colwidth = 2000\njobs.new[jobs.new.code_ogr.isin(new_jobs)][['code_ogr', 'libelle_appellation_long', 'code_rome']]\nExplanation: Alright, so the only change seems to be 4 new jobs added. Let's take a look (only showing interesting fields):\nEnd of explanation\nitems = find_rome_dataset_by_name(rome_data, 'item')\nnew_items = set(items.new.code_ogr) - set(items.old.code_ogr)\nobsolete_items = set(items.old.code_ogr) - set(items.new.code_ogr)\nstable_items = set(items.new.code_ogr) & set(items.old.code_ogr)\nmatplotlib_venn.venn2((len(obsolete_items), len(new_items), len(stable_items)), (OLD_VERSION, NEW_VERSION));\nExplanation: These seems to be refinements of existing jobs, and two new ones are related to digital mediation.\nOK, let's check at the changes in items:\nEnd of explanation\nitems.new[items.new.code_ogr.isin(new_items)].head()\nExplanation: As anticipated it is a very minor change (hard to see it visually): there are 4 new ones have been created. Let's have a look at them.\nEnd of explanation\nlinks = find_rome_dataset_by_name(rome_data, 'liens_rome_referentiels')\nold_links_on_stable_items = links.old[links.old.code_ogr.isin(stable_items)]\nnew_links_on_stable_items = links.new[links.new.code_ogr.isin(stable_items)]\nold = old_links_on_stable_items[['code_rome', 'code_ogr']]\nnew = new_links_on_stable_items[['code_rome', 'code_ogr']]\nlinks_merged = old.merge(new, how='outer', indicator=True)\nlinks_merged['_diff'] = links_merged._merge.map({'left_only': 'removed', 'right_only': 'added'})\nlinks_merged._diff.value_counts()\nExplanation: The new ones seem legit to me.\nThe changes in liens_rome_referentiels include changes for those items, so let's only check the changes not related to those.\nEnd of explanation\njob_group_names = find_rome_dataset_by_name(rome_data, 'referentiel_code_rome').new.set_index('code_rome').libelle_rome\nitem_names = items.new.set_index('code_ogr').libelle.drop_duplicates()\nlinks_merged['job_group_name'] = links_merged.code_rome.map(job_group_names)\nlinks_merged['item_name'] = links_merged.code_ogr.map(item_names)\ndisplay(links_merged[links_merged._diff == 'removed'].dropna().head(5))\nlinks_merged[links_merged._diff == 'added'].dropna().head(5)\nExplanation: So in addition to the added items, there are few fixes. Let's have a look at them:\nEnd of explanation"}}},{"rowIdx":2114,"cells":{"Unnamed: 0":{"kind":"number","value":2114,"string":"2,114"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n \"What just happened???\"\nHere we take an existing modflow model and setup a very complex parameterization system for arrays and boundary conditions. All parameters are setup as multpliers\nStep1: You want some pilot points? We got that...how about one set of recharge multiplier pilot points applied to all stress periods? and sy in layer 1?\nStep2: Parameterization\nStep3: You want some constants (uniform value multipliers)? We got that too....\nStep4: You want grid-scale parameter flexibility for hk in all layers? We got that too...and how about sy in layer 1 and vka in layer 2 while we are at it\nStep5: Some people like using zones...so we have those too\nStep6: But wait, boundary conditions are uncertain too...Can we add some parameter to represent that uncertainty? You know it!\nStep7: Observations\nSince observations are \"free\", we can carry lots of them around...\nStep8: Here it goes...\nNow we will use all these args to construct a complete PEST interface - template files, instruction files, control file and even the forward run script! All parameters are setup as multiplers against the existing inputs in the modflow model - the existing inputs are extracted (with flopy) and saved in a sub directory for safe keep and for multiplying against during a forward model run. The constructor will also write a full (covariates included) prior parameter covariance matrix, which is needed for all sorts of important analyses.|\nStep9: The mpf_boss instance containts a pyemu.Pst object (its already been saved to a file, but you may want to manipulate it more)\nStep10: That was crazy easy - this used to take me weeks to get a PEST interface setup with level of complexity\nStep11: Lets look at that important prior covariance matrix\nStep12: adjusting parameter bounds\nLet's say you don't like the parameter bounds in the new control file (note you can pass a par_bounds arg to the constructor).\nStep13: Let's change the welflux pars\nStep14: Boom!"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport os\nimport platform\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport flopy\nimport pyemu\nnam_file = \"freyberg.nam\"\norg_model_ws = \"freyberg_sfr_update\"\ntemp_model_ws = \"temp\"\nnew_model_ws = \"template\"\n# load the model, change dir and run once just to make sure everthing is working\nm = flopy.modflow.Modflow.load(nam_file,model_ws=org_model_ws,check=False, exe_name=\"mfnwt\",\n forgive=False,verbose=True)\nm.change_model_ws(temp_model_ws,reset_external=True)\nm.write_input()\nEXE_DIR = os.path.join(\"..\",\"bin\")\nif \"window\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"win\")\nelif \"darwin\" in platform.platform().lower() or \"macos\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"mac\")\nelse:\n EXE_DIR = os.path.join(EXE_DIR,\"linux\")\n[shutil.copy2(os.path.join(EXE_DIR,f),os.path.join(temp_model_ws,f)) for f in os.listdir(EXE_DIR)]\ntry:\n m.run_model()\nexcept():\n pass\nExplanation: \"What just happened???\"\nHere we take an existing modflow model and setup a very complex parameterization system for arrays and boundary conditions. All parameters are setup as multpliers: the original inputs from the modflow model are saved in separate files and during the forward run, they are multplied by the parameters to form new model inputs. the forward run script (\"forward_run.py\") is also written. And somewhat meaningful prior covariance matrix is constructed from geostatistical structures with out any additional arguements...oh yeah!\nEnd of explanation\nm.get_package_list()\nExplanation: You want some pilot points? We got that...how about one set of recharge multiplier pilot points applied to all stress periods? and sy in layer 1?\nEnd of explanation\npp_props = [[\"upw.sy\",0], [\"rch.rech\",None]]\nExplanation: Parameterization\nEnd of explanation\nconst_props = []\nfor iper in range(m.nper): # recharge for past and future\n const_props.append([\"rch.rech\",iper])\nfor k in range(m.nlay):\n const_props.append([\"upw.hk\",k])\n const_props.append([\"upw.ss\",k])\nExplanation: You want some constants (uniform value multipliers)? We got that too....\nEnd of explanation\ngrid_props = [[\"upw.sy\",0],[\"upw.vka\",1]]\nfor k in range(m.nlay):\n grid_props.append([\"upw.hk\",k])\n \nExplanation: You want grid-scale parameter flexibility for hk in all layers? We got that too...and how about sy in layer 1 and vka in layer 2 while we are at it\nEnd of explanation\nzn_array = np.loadtxt(os.path.join(\"Freyberg_Truth\",\"hk.zones\"))\nplt.imshow(zn_array)\nzone_props = [[\"upw.ss\",0], [\"rch.rech\",0],[\"rch.rech\",1]]\nk_zone_dict = {k:zn_array for k in range(m.nlay)}\nExplanation: Some people like using zones...so we have those too\nEnd of explanation\nbc_props = []\nfor iper in range(m.nper):\n bc_props.append([\"wel.flux\",iper])\nExplanation: But wait, boundary conditions are uncertain too...Can we add some parameter to represent that uncertainty? You know it!\nEnd of explanation\n# here were are building a list of stress period, layer pairs (zero-based) that we will use\n# to setup obserations from every active model cell for a given pair\nhds_kperk = []\nfor iper in range(m.nper):\n for k in range(m.nlay):\n hds_kperk.append([iper,k])\nExplanation: Observations\nSince observations are \"free\", we can carry lots of them around...\nEnd of explanation\nmfp_boss = pyemu.helpers.PstFromFlopyModel(nam_file,new_model_ws,org_model_ws=temp_model_ws,\n pp_props=pp_props,spatial_list_props=bc_props,\n zone_props=zone_props,grid_props=grid_props,\n const_props=const_props,k_zone_dict=k_zone_dict,\n remove_existing=True,pp_space=4,sfr_pars=True,\n sfr_obs=True,hds_kperk=hds_kperk)\nEXE_DIR = os.path.join(\"..\",\"bin\")\nif \"window\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"win\")\nelif \"darwin\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"mac\")\nelse:\n EXE_DIR = os.path.join(EXE_DIR,\"linux\")\n[shutil.copy2(os.path.join(EXE_DIR,f),os.path.join(new_model_ws,f)) for f in os.listdir(EXE_DIR)]\nExplanation: Here it goes...\nNow we will use all these args to construct a complete PEST interface - template files, instruction files, control file and even the forward run script! All parameters are setup as multiplers against the existing inputs in the modflow model - the existing inputs are extracted (with flopy) and saved in a sub directory for safe keep and for multiplying against during a forward model run. The constructor will also write a full (covariates included) prior parameter covariance matrix, which is needed for all sorts of important analyses.|\nEnd of explanation\npst = mfp_boss.pst\npst.npar,pst.nobs\nExplanation: The mpf_boss instance containts a pyemu.Pst object (its already been saved to a file, but you may want to manipulate it more)\nEnd of explanation\npst.template_files\npst.instruction_files\nExplanation: That was crazy easy - this used to take me weeks to get a PEST interface setup with level of complexity\nEnd of explanation\ncov = pyemu.Cov.from_ascii(os.path.join(new_model_ws,m.name+\".pst.prior.cov\"))\ncov = cov.x\ncov[cov==0] = np.NaN\nplt.imshow(cov)\nExplanation: Lets look at that important prior covariance matrix\nEnd of explanation\npst.parameter_data\nExplanation: adjusting parameter bounds\nLet's say you don't like the parameter bounds in the new control file (note you can pass a par_bounds arg to the constructor).\nEnd of explanation\npar = pst.parameter_data #get a ref to the parameter data dataframe\nwpars = par.pargp==\"welflux_k02\"\npar.loc[wpars]\npar.loc[wpars,\"parubnd\"] = 1.1\npar.loc[wpars,\"parlbnd\"] = 0.9\npst.parameter_data\n# now we need to rebuild the prior parameter covariance matrix\ncov = mfp_boss.build_prior()\nExplanation: Let's change the welflux pars\nEnd of explanation\nx = cov.x\nx[x==0.0] = np.NaN\nplt.imshow(x)\nExplanation: Boom!\nEnd of explanation"}}},{"rowIdx":2115,"cells":{"Unnamed: 0":{"kind":"number","value":2115,"string":"2,115"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n SMOGN (0.1.0)\nStep1: Dependencies\nNext, we load the required dependencies. Here we import smogn to later apply Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise. In addition, we use pandas for data handling, and seaborn to visualize our results.\nStep2: Data\nAfter, we load our data. In this example, we use the Ames Housing Dataset training split retreived from Kaggle, originally complied by Dean De Cock. In this case, we name our training set housing\nStep3: Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise\nHere we cover the focus of this example. We call the smoter function from this package (smogn.smoter) and satisfy a typtical number of arguments\nStep4: Note\nStep5: Further examining the results, we can see that the distribution of the response variable has changed. By calling the box_plot_stats function from this package (smogn.box_plot_stats) we quickly verify.\nNotice that the modified training set's box plot five number summary has changed, where the distribution of the response variable has skewed right when compared to the original training set.\nStep6: Plotting the results of both the original and modified training sets, the skewed right distribution of the response variable in the modified training set becomes more evident.\nIn this example, SMOGN over-sampled observations whose 'SalePrice' was found to be extremely high according to the box plot (those considered \"minority\") and under-sampled observations that were closer to the median (those considered \"majority\").\nThis is perhaps most useful when most of the y values of interest in predicting are found at the extremes of the distribution within a given dataset."},"code_prompt":{"kind":"string","value":"Python Code:\n## suppress install output\n%%capture\n## install pypi release\n# !pip install smogn\n## install developer version\n!pip install git+https://github.com/nickkunz/smogn.git\nExplanation: SMOGN (0.1.0): Usage\nExample 2: Intermediate\nInstallation\nFirst, we install SMOGN from the Github repository. Alternatively, we could install from the official PyPI distribution. However, the developer version is utilized here for the latest release.\nEnd of explanation\n## load dependencies\nimport smogn\nimport pandas\nimport seaborn\nExplanation: Dependencies\nNext, we load the required dependencies. Here we import smogn to later apply Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise. In addition, we use pandas for data handling, and seaborn to visualize our results.\nEnd of explanation\n## load data\nhousing = pandas.read_csv(\n ## http://jse.amstat.org/v19n3/decock.pdf\n 'https://raw.githubusercontent.com/nickkunz/smogn/master/data/housing.csv'\n)\nExplanation: Data\nAfter, we load our data. In this example, we use the Ames Housing Dataset training split retreived from Kaggle, originally complied by Dean De Cock. In this case, we name our training set housing\nEnd of explanation\n## conduct smogn\nhousing_smogn = smogn.smoter(\n \n ## main arguments\n data = housing, ## pandas dataframe\n y = 'SalePrice', ## string ('header name')\n k = 9, ## positive integer (k < n)\n samp_method = 'extreme', ## string ('balance' or 'extreme')\n ## phi relevance arguments\n rel_thres = 0.80, ## positive real number (0 < R < 1)\n rel_method = 'auto', ## string ('auto' or 'manual')\n rel_xtrm_type = 'high', ## string ('low' or 'both' or 'high')\n rel_coef = 2.25 ## positive real number (0 < R)\n)\nExplanation: Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise\nHere we cover the focus of this example. We call the smoter function from this package (smogn.smoter) and satisfy a typtical number of arguments: data, y, k, samp_method, rel_thres, rel_method, rel_xtrm_type, rel_coef\nThe data argument takes a Pandas DataFrame, which contains the training set split. In this example, we input the previously loaded housing training set with follow input: data = housing\nThe y argument takes a string, which specifies a continuous reponse variable by header name. In this example, we input 'SalePrice' in the interest of predicting the sale price of homes in Ames, Iowa with the following input: y = 'SalePrice' \nThe k argument takes a positive integer less than 𝑛, where 𝑛 is the sample size. k specifies the number of neighbors to consider for interpolation used in over-sampling. In this example, we input 9 to consider 4 additional neighbors (default is 5) with the following input: k = 9\nThe samp_method argument takes a string, either 'balance' or 'extreme'. If 'balance' is specified, less over/under-sampling is conducted. If 'extreme' is specified, more over/under-sampling is conducted. In this case, we input 'extreme' (default is 'balance') to aggressively over/under-sample with the following input: samp_method = 'extreme'\nThe rel_thres argument takes a real number between 0 and 1. It specifies the threshold of rarity. The higher the threshold, the higher the over/under-sampling boundary. The inverse is also true, where the lower the threshold, the lower the over/under-sampling boundary. In this example, we increase the boundary to 0.80 (default is 0.50) with the following input: rel_thres = 0.80\nThe rel_method argument takes a string, either 'auto' or 'manual'. It specifies how relevant or rare \"minority\" values in y are determined. If 'auto' is specified, \"minority\" values are automatically determined by box plot extremes. If 'manual' is specified, \"minority\" values are determined by the user. In this example, we input 'auto' with the following input: rel_method = 'auto'\nThe rel_xtrm_type argument takes a string, either 'low' or 'both' or 'high'. It indicates which region of the response variable y should be considered rare or a \"minority\", when rel_method = 'auto'. In this example, we input 'high' (default is 'both') in the interest of over-sampling high \"minority\" values in y with the following input: rel_xtrm_type = 'high'\nThe rel_coef argument takes a positive real number. It specifies the box plot coefficient used to automatically determine extreme and therefore rare \"minority\" values in y, when rel_method = 'auto'. In this example, we input 2.25 (default is 1.50) to increase the box plot extremes with the following input: rel_coef = 2.25\nEnd of explanation\n## dimensions - original data \nhousing.shape\n## dimensions - modified data\nhousing_smogn.shape\nExplanation: Note:\nIn this example, the regions of interest within the response variable y are automatically determined by box plot extremes. The extreme values are considered rare \"minorty\" values are over-sampled. The values closer the median are considered \"majority\" values and are under-sampled.\nIf there are no box plot extremes contained in the reponse variable y, the argument rel_method = manual must be specified, and an input matrix must be placed into the argument rel_ctrl_pts_rg indicating the regions of rarity in y.\nMore information regarding the matrix input to the rel_ctrl_pts_rg argument and manual over-sampling can be found within the function's doc string, as well as in Example 3: Advanced.\nIt is also important to mention that by default, smogn.smoter will first automatically remove columns containing missing values and then remove rows, as it cannot input data containing missing values. This feature can be changed with the boolean arguments drop_na_col = False and drop_na_rows = False.\nResults\nAfter conducting Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise, we briefly examine the results. \nWe can see that the number of observations (rows) in the original training set increased from 1460 to 2643, while the number of features (columns) decreased from 81 to 62. \nRecall that smogn.smoter automatically removes features containing missing values. In this case, 19 features contained missing values and were therefore omitted.\nThe increase in observations were a result of over-sampling. More detailed information in this regard can be found in the original paper cited in the References section.\nEnd of explanation\n## box plot stats - original data \nsmogn.box_plot_stats(housing['SalePrice'])['stats']\n## box plot stats - modified data\nsmogn.box_plot_stats(housing_smogn['SalePrice'])['stats']\nExplanation: Further examining the results, we can see that the distribution of the response variable has changed. By calling the box_plot_stats function from this package (smogn.box_plot_stats) we quickly verify.\nNotice that the modified training set's box plot five number summary has changed, where the distribution of the response variable has skewed right when compared to the original training set.\nEnd of explanation\n## plot y distribution \nseaborn.kdeplot(housing['SalePrice'], label = \"Original\")\nseaborn.kdeplot(housing_smogn['SalePrice'], label = \"Modified\")\nExplanation: Plotting the results of both the original and modified training sets, the skewed right distribution of the response variable in the modified training set becomes more evident.\nIn this example, SMOGN over-sampled observations whose 'SalePrice' was found to be extremely high according to the box plot (those considered \"minority\") and under-sampled observations that were closer to the median (those considered \"majority\").\nThis is perhaps most useful when most of the y values of interest in predicting are found at the extremes of the distribution within a given dataset.\nEnd of explanation"}}},{"rowIdx":2116,"cells":{"Unnamed: 0":{"kind":"number","value":2116,"string":"2,116"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n DiscreteDP Example\nStep1: Setup\nStep2: Continuous-state benchmark\nLet us compute the value function of the continuous-state version\nas described in equations (2.22) and (2.23) in Section 2.3.\nStep3: The optimal stopping boundary $\\gamma$ for the contiuous-state version, given by (2.23)\nStep4: The value function for the continuous-state version, given by (2.24)\nStep5: Solving the problem with DiscreteDP\nConstruct a DiscreteDP instance for the disrete-state version\nStep6: Let us solve the decision problem by\n(0) value iteration,\n(1) value iteration with span-based termination\n(equivalent to modified policy iteration with step $k = 0$),\n(2) policy iteration,\n(3) modified policy iteration.\nFollowing Rust (1996), we set\nStep7: The numbers of iterations\nStep8: Policy iteration gives the optimal policy\nStep9: Takes action 1 (\"replace\") if and only if $s \\geq \\bar{\\gamma}$, where $\\bar{\\gamma}$ is equal to\nStep10: Check that the other methods gave the correct answer\nStep11: The deviations of the returned value function from the continuous-state benchmark\nStep12: In the following we try to reproduce Table 14.1 in Rust (1996), p.660,\nalthough the precise definitions and procedures there are not very clear.\nThe maximum absolute differences of $v$ from that by policy iteration\nStep13: Compute $\\lVert v - T(v)\\rVert$\nStep14: Next we compute $\\overline{b} - \\underline{b}$\nfor the three methods other than policy iteration, where\n$I$ is the number of iterations required to fulfill the termination condition, and\n$$\n\\begin{aligned}\n\\underline{b} &= \\frac{\\beta}{1-\\beta} \\min\\left[T(v^{I-1}) - v^{I-1}\\right], \\\\\n\\overline{b} &= \\frac{\\beta}{1-\\beta} \\max\\left[T(v^{I-1}) - v^{I-1}\\right].\n\\end{aligned}\n$$\nStep15: For policy iteration, while it does not seem really relevant,\nwe compute $\\overline{b} - \\underline{b}$ with the returned value of $v$\nin place of $v^{I-1}$\nStep16: Last, time each algorithm\nStep17: Notes\nIt appears that our value iteration with span-based termination is different in some details\nfrom the corresponding algorithm (successive approximation with error bounds) in Rust.\nIn returing the value function, our algorithm returns\n$T(v^{I-1}) + (\\overline{b} + \\underline{b})/2$,\nwhile Rust's seems to return $v^{I-1} + (\\overline{b} + \\underline{b})/2$.\nIn fact\nStep18: $\\lVert v - v_{\\mathrm{pi}}\\rVert$\nStep19: $\\lVert v - T(v)\\rVert$\nStep20: Compare the Table in Rust.\nConvergence of trajectories\nLet us plot the convergence of $v^i$ for the four algorithms;\nsee also Figure 14.2 in Rust.\nValue iteration\nStep21: Value iteration with span-based termination\nStep22: Policy iteration\nStep23: Modified policy iteration\nStep24: Increasing the discount factor\nLet us consider the case with a discount factor closer to $1$, $\\beta = 0.9999$.\nStep25: The numbers of iterations\nStep26: Policy iteration gives the optimal policy\nStep27: Takes action 1 (\"replace\") if and only if $s \\geq \\bar{\\gamma}$, where $\\bar{\\gamma}$ is equal to\nStep28: Check that the other methods gave the correct answer\nStep29: $\\lVert v - v_{\\mathrm{pi}}\\rVert$\nStep30: $\\lVert v - T(v)\\rVert$\nStep31: $\\overline{b} - \\underline{b}$\nStep32: For policy iteration"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nimport itertools\nimport scipy.optimize\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom quantecon.markov import DiscreteDP\n# matplotlib settings\nplt.rcParams['axes.autolimit_mode'] = 'round_numbers'\nplt.rcParams['axes.xmargin'] = 0\nplt.rcParams['axes.ymargin'] = 0\nplt.rcParams['patch.force_edgecolor'] = True\nfrom cycler import cycler\nplt.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')\nExplanation: DiscreteDP Example: Automobile Replacement\nDaisuke Oyama\nFaculty of Economics, University of Tokyo\nWe study the finite-state version of the automobile replacement problem as considered in\nRust (1996, Section 4.2.2).\nJ. Rust, \"Numerical Dynamic Programming in Economics\",\n Handbook of Computational Economics, Volume 1, 619-729, 1996.\nEnd of explanation\nlambd = 0.5 # Exponential distribution parameter\nc = 200 # (Constant) marginal cost of maintainance\nnet_price = 10**5 # Replacement cost\nn = 100 # Number of states; s = 0, ..., n-1: level of utilization of the asset\nm = 2 # Number of actions; 0: keep, 1: replace\n# Reward array\nR = np.empty((n, m))\nR[:, 0] = -c * np.arange(n) # Costs for maintainance\nR[:, 1] = -net_price - c * 0 # Costs for replacement\n# Transition probability array\n# For each state s, s' distributes over\n# s, s+1, ..., min{s+supp_size-1, n-1} if a = 0\n# 0, 1, ..., supp_size-1 if a = 1\n# according to the (discretized and truncated) exponential distribution\n# with parameter lambd\nsupp_size = 12\nprobs = np.empty(supp_size)\nprobs[0] = 1 - np.exp(-lambd * 0.5)\nfor j in range(1, supp_size-1):\n probs[j] = np.exp(-lambd * (j - 0.5)) - np.exp(-lambd * (j + 0.5))\nprobs[supp_size-1] = 1 - np.sum(probs[:-1])\nQ = np.zeros((n, m, n))\n# a = 0\nfor i in range(n-supp_size):\n Q[i, 0, i:i+supp_size] = probs\nfor k in range(supp_size):\n Q[n-supp_size+k, 0, n-supp_size+k:] = probs[:supp_size-k]/probs[:supp_size-k].sum()\n# a = 1\nfor i in range(n):\n Q[i, 1, :supp_size] = probs\n# Discount factor\nbeta = 0.95\nExplanation: Setup\nEnd of explanation\ndef f(x, s):\n return (c/(1-beta)) * \\\n ((x-s) - (beta/(lambd*(1-beta))) * (1 - np.exp(-lambd*(1-beta)*(x-s))))\nExplanation: Continuous-state benchmark\nLet us compute the value function of the continuous-state version\nas described in equations (2.22) and (2.23) in Section 2.3.\nEnd of explanation\ngamma = scipy.optimize.brentq(lambda x: f(x, 0) - net_price, 0, 100)\nprint(gamma)\nExplanation: The optimal stopping boundary $\\gamma$ for the contiuous-state version, given by (2.23):\nEnd of explanation\ndef value_func_cont_time(s):\n return -c*gamma/(1-beta) + (s < gamma) * f(gamma, s)\nv_cont = value_func_cont_time(np.arange(n))\nExplanation: The value function for the continuous-state version, given by (2.24):\nEnd of explanation\nddp = DiscreteDP(R, Q, beta)\nExplanation: Solving the problem with DiscreteDP\nConstruct a DiscreteDP instance for the disrete-state version:\nEnd of explanation\nv_init = np.zeros(ddp.num_states)\nepsilon = 1164\nmethods = ['vi', 'mpi', 'pi', 'mpi']\nlabels = ['Value iteration', 'Value iteration with span-based termination',\n 'Policy iteration', 'Modified policy iteration']\nresults = {}\nfor i in range(4):\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n results[labels[i]] = \\\n ddp.solve(method=methods[i], v_init=v_init, epsilon=epsilon, k=k)\ncolumns = [\n 'Iterations', 'Time (second)', r'$\\lVert v - v_{\\mathrm{pi}} \\rVert$',\n r'$\\overline{b} - \\underline{b}$', r'$\\lVert v - T(v)\\rVert$'\n]\ndf = pd.DataFrame(index=labels, columns=columns)\nExplanation: Let us solve the decision problem by\n(0) value iteration,\n(1) value iteration with span-based termination\n(equivalent to modified policy iteration with step $k = 0$),\n(2) policy iteration,\n(3) modified policy iteration.\nFollowing Rust (1996), we set:\n$\\varepsilon = 1164$ (for value iteration and modified policy iteration),\n$v^0 \\equiv 0$,\nthe number of iteration for iterative policy evaluation $k = 20$.\nEnd of explanation\nfor label in labels:\n print(results[label].num_iter, '\\t' + '(' + label + ')')\n df[columns[0]].loc[label] = results[label].num_iter\nExplanation: The numbers of iterations:\nEnd of explanation\nprint(results['Policy iteration'].sigma)\nExplanation: Policy iteration gives the optimal policy:\nEnd of explanation\n(1-results['Policy iteration'].sigma).sum()\nExplanation: Takes action 1 (\"replace\") if and only if $s \\geq \\bar{\\gamma}$, where $\\bar{\\gamma}$ is equal to:\nEnd of explanation\nfor result in results.values():\n if result != results['Policy iteration']:\n print(np.array_equal(result.sigma, results['Policy iteration'].sigma))\nExplanation: Check that the other methods gave the correct answer:\nEnd of explanation\ndiffs_cont = {}\nfor label in labels:\n diffs_cont[label] = np.abs(results[label].v - v_cont).max()\n print(diffs_cont[label], '\\t' + '(' + label + ')')\nlabel = 'Policy iteration'\nfig, ax = plt.subplots(figsize=(8,5))\nax.plot(-v_cont, label='Continuous-state')\nax.plot(-results[label].v, label=label)\nax.set_title('Comparison of discrete vs. continuous value functions')\nax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nax.set_xlabel('State')\nax.set_ylabel(r'Value $\\times\\ (-1)$')\nplt.legend(loc=4)\nplt.show()\nExplanation: The deviations of the returned value function from the continuous-state benchmark:\nEnd of explanation\nfor label in labels:\n diff_pi = \\\n np.abs(results[label].v - results['Policy iteration'].v).max()\n print(diff_pi, '\\t' + '(' + label + ')')\n df[columns[2]].loc[label] = diff_pi\nExplanation: In the following we try to reproduce Table 14.1 in Rust (1996), p.660,\nalthough the precise definitions and procedures there are not very clear.\nThe maximum absolute differences of $v$ from that by policy iteration:\nEnd of explanation\nfor label in labels:\n v = results[label].v\n diff_max = \\\n np.abs(v - ddp.bellman_operator(v)).max()\n print(diff_max, '\\t' + '(' + label + ')')\n df[columns[4]].loc[label] = diff_max\nExplanation: Compute $\\lVert v - T(v)\\rVert$:\nEnd of explanation\nfor i in range(4):\n if labels[i] != 'Policy iteration':\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n res = ddp.solve(method=methods[i], v_init=v_init, k=k,\n max_iter=results[labels[i]].num_iter-1)\n diff = ddp.bellman_operator(res.v) - res.v\n diff_span = (diff.max() - diff.min()) * ddp.beta / (1 - ddp.beta)\n print(diff_span, '\\t' + '(' + labels[i] + ')')\n df[columns[3]].loc[labels[i]] = diff_span\nExplanation: Next we compute $\\overline{b} - \\underline{b}$\nfor the three methods other than policy iteration, where\n$I$ is the number of iterations required to fulfill the termination condition, and\n$$\n\\begin{aligned}\n\\underline{b} &= \\frac{\\beta}{1-\\beta} \\min\\left[T(v^{I-1}) - v^{I-1}\\right], \\\\\n\\overline{b} &= \\frac{\\beta}{1-\\beta} \\max\\left[T(v^{I-1}) - v^{I-1}\\right].\n\\end{aligned}\n$$\nEnd of explanation\nlabel = 'Policy iteration'\nv = results[label].v\ndiff = ddp.bellman_operator(v) - v\ndiff_span = (diff.max() - diff.min()) * ddp.beta / (1 - ddp.beta)\nprint(diff_span, '\\t' + '(' + label + ')')\ndf[columns[3]].loc[label] = diff_span\nExplanation: For policy iteration, while it does not seem really relevant,\nwe compute $\\overline{b} - \\underline{b}$ with the returned value of $v$\nin place of $v^{I-1}$:\nEnd of explanation\nfor i in range(4):\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n print(labels[i])\n t = %timeit -o ddp.solve(method=methods[i], v_init=v_init, epsilon=epsilon, k=k)\n df[columns[1]].loc[labels[i]] = t.best\ndf\nExplanation: Last, time each algorithm:\nEnd of explanation\ni = 1\nk = 0\nres = ddp.solve(method=methods[i], v_init=v_init, k=k,\n max_iter=results[labels[i]].num_iter-1)\ndiff = ddp.bellman_operator(res.v) - res.v\nv = res.v + (diff.max() + diff.min()) * ddp.beta / (1 - ddp.beta) / 2\nExplanation: Notes\nIt appears that our value iteration with span-based termination is different in some details\nfrom the corresponding algorithm (successive approximation with error bounds) in Rust.\nIn returing the value function, our algorithm returns\n$T(v^{I-1}) + (\\overline{b} + \\underline{b})/2$,\nwhile Rust's seems to return $v^{I-1} + (\\overline{b} + \\underline{b})/2$.\nIn fact:\nEnd of explanation\nnp.abs(v - results['Policy iteration'].v).max()\nExplanation: $\\lVert v - v_{\\mathrm{pi}}\\rVert$:\nEnd of explanation\nnp.abs(v - ddp.bellman_operator(v)).max()\nExplanation: $\\lVert v - T(v)\\rVert$:\nEnd of explanation\nlabel = 'Value iteration'\niters = [2, 20, 40, 80]\nv = np.zeros(ddp.num_states)\nfig, ax = plt.subplots(figsize=(8,5))\nfor i in range(iters[-1]):\n v = ddp.bellman_operator(v)\n if i+1 in iters:\n ax.plot(-v, label='Iteration {0}'.format(i+1))\nax.plot(-results['Policy iteration'].v, label='Fixed Point')\nax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nax.set_ylim(0, 2.4e5)\nax.set_yticks([0.4e5 * i for i in range(7)])\nax.set_title(label)\nax.set_xlabel('State')\nax.set_ylabel(r'Value $\\times\\ (-1)$')\nplt.legend(loc=(0.7, 0.2))\nplt.show()\nExplanation: Compare the Table in Rust.\nConvergence of trajectories\nLet us plot the convergence of $v^i$ for the four algorithms;\nsee also Figure 14.2 in Rust.\nValue iteration\nEnd of explanation\nlabel = 'Value iteration with span-based termination'\niters = [1, 10, 15, 20]\nv = np.zeros(ddp.num_states)\nfig, ax = plt.subplots(figsize=(8,5))\nfor i in range(iters[-1]):\n u = ddp.bellman_operator(v)\n if i+1 in iters:\n diff = u - v\n w = u + ((diff.max() + diff.min()) / 2) * ddp.beta / (1 - ddp.beta)\n ax.plot(-w, label='Iteration {0}'.format(i+1))\n v = u\nax.plot(-results['Policy iteration'].v, label='Fixed Point')\nax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nax.set_ylim(1.0e5, 2.4e5)\nax.set_yticks([1.0e5+0.2e5 * i for i in range(8)])\nax.set_title(label)\nax.set_xlabel('State')\nax.set_ylabel(r'Value $\\times\\ (-1)$')\nplt.legend(loc=(0.7, 0.2))\nplt.show()\nExplanation: Value iteration with span-based termination\nEnd of explanation\nlabel = 'Policy iteration'\niters = [1, 2, 3]\nv_init = np.zeros(ddp.num_states)\nfig, ax = plt.subplots(figsize=(8,5))\nsigma = ddp.compute_greedy(v_init)\nfor i in range(iters[-1]):\n # Policy evaluation\n v_sigma = ddp.evaluate_policy(sigma)\n if i+1 in iters:\n ax.plot(-v_sigma, label='Iteration {0}'.format(i+1))\n # Policy improvement\n new_sigma = ddp.compute_greedy(v_sigma)\n sigma = new_sigma\nax.plot(-results['Policy iteration'].v, label='Fixed Point')\nax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nax.set_ylim(1e5, 4.2e5)\nax.set_yticks([1e5 + 0.4e5 * i for i in range(9)])\nax.set_title(label)\nax.set_xlabel('State')\nax.set_ylabel(r'Value $\\times\\ (-1)$')\nplt.legend(loc=4)\nplt.show()\nExplanation: Policy iteration\nEnd of explanation\nlabel = 'Modified policy iteration'\niters = [1, 2, 3, 4]\nv = np.zeros(ddp.num_states)\nk = 20 #- 1\nfig, ax = plt.subplots(figsize=(8,5))\nfor i in range(iters[-1]):\n # Policy improvement\n sigma = ddp.compute_greedy(v)\n u = ddp.bellman_operator(v)\n if i == results[label].num_iter-1:\n diff = u - v\n break\n # Partial policy evaluation with k=20 iterations\n for j in range(k):\n u = ddp.T_sigma(sigma)(u)\n v = u\n if i+1 in iters:\n ax.plot(-v, label='Iteration {0}'.format(i+1))\nax.plot(-results['Policy iteration'].v, label='Fixed Point')\nax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nax.set_ylim(0, 2.8e5)\nax.set_yticks([0.4e5 * i for i in range(8)])\nax.set_title(label)\nax.set_xlabel('State')\nax.set_ylabel(r'Value $\\times\\ (-1)$')\nplt.legend(loc=4)\nplt.show()\nExplanation: Modified policy iteration\nEnd of explanation\nddp.beta = 0.9999\nv_init = np.zeros(ddp.num_states)\nepsilon = 1164\nddp.max_iter = 10**5 * 2\nresults_9999 = {}\nfor i in range(4):\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n results_9999[labels[i]] = \\\n ddp.solve(method=methods[i], v_init=v_init, epsilon=epsilon, k=k)\ndf_9999 = pd.DataFrame(index=labels, columns=columns)\nExplanation: Increasing the discount factor\nLet us consider the case with a discount factor closer to $1$, $\\beta = 0.9999$.\nEnd of explanation\nfor label in labels:\n print(results_9999[label].num_iter, '\\t' + '(' + label + ')')\n df_9999[columns[0]].loc[label] = results_9999[label].num_iter\nExplanation: The numbers of iterations:\nEnd of explanation\nprint(results_9999['Policy iteration'].sigma)\nExplanation: Policy iteration gives the optimal policy:\nEnd of explanation\n(1-results_9999['Policy iteration'].sigma).sum()\nExplanation: Takes action 1 (\"replace\") if and only if $s \\geq \\bar{\\gamma}$, where $\\bar{\\gamma}$ is equal to:\nEnd of explanation\nfor result in results_9999.values():\n if result != results_9999['Policy iteration']:\n print(np.array_equal(result.sigma, results_9999['Policy iteration'].sigma))\nExplanation: Check that the other methods gave the correct answer:\nEnd of explanation\nfor label in labels:\n diff_pi = \\\n np.abs(results_9999[label].v - results_9999['Policy iteration'].v).max()\n print(diff_pi, '\\t' + '(' + label + ')')\n df_9999[columns[2]].loc[label] = diff_pi\nExplanation: $\\lVert v - v_{\\mathrm{pi}}\\rVert$:\nEnd of explanation\nfor label in labels:\n v = results_9999[label].v\n diff_max = \\\n np.abs(v - ddp.bellman_operator(v)).max()\n print(diff_max, '\\t' + '(' + label + ')')\n df_9999[columns[4]].loc[label] = diff_max\nExplanation: $\\lVert v - T(v)\\rVert$:\nEnd of explanation\nfor i in range(4):\n if labels[i] != 'Policy iteration':\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n res = ddp.solve(method=methods[i], v_init=v_init, k=k,\n max_iter=results_9999[labels[i]].num_iter-1)\n diff = ddp.bellman_operator(res.v) - res.v\n diff_span = (diff.max() - diff.min()) * ddp.beta / (1 - ddp.beta)\n print(diff_span, '\\t' + '(' + labels[i] + ')')\n df_9999[columns[3]].loc[labels[i]] = diff_span\nExplanation: $\\overline{b} - \\underline{b}$:\nEnd of explanation\nlabel = 'Policy iteration'\nv = results_9999[label].v\ndiff = ddp.bellman_operator(v) - v\ndiff_span = (diff.max() - diff.min()) * ddp.beta / (1 - ddp.beta)\nprint(diff_span, '\\t' + '(' + label + ')')\ndf_9999[columns[3]].loc[label] = diff_span\nfor i in range(4):\n k = 20 if labels[i] == 'Modified policy iteration' else 0\n print(labels[i])\n t = %timeit -o ddp.solve(method=methods[i], v_init=v_init, epsilon=epsilon, k=k)\n df_9999[columns[1]].loc[labels[i]] = t.best\ndf_9999\ndf_time = pd.DataFrame(index=labels)\ndf_time[r'$\\beta = 0.95$'] = df[columns[1]]\ndf_time[r'$\\beta = 0.9999$'] = df_9999[columns[1]]\nsecond_max = df_time[r'$\\beta = 0.9999$'][1:].max()\nfor xlim in [None, (0, second_max*1.2)]:\n ax = df_time.loc[reversed(labels)][df_time.columns[::-1]].plot(\n kind='barh', legend='reverse', xlim=xlim, figsize=(8,5)\n )\n ax.set_xlabel('Time (second)')\nimport platform\nprint(platform.platform())\nimport sys\nprint(sys.version)\nprint(np.__version__)\nExplanation: For policy iteration:\nEnd of explanation"}}},{"rowIdx":2117,"cells":{"Unnamed: 0":{"kind":"number","value":2117,"string":"2,117"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Lecture 20\nStep1: L is the LENGTH of our box. You can set this to any value you choose however, appropriate scaling of the problem would admit 1 as the length of choice.\nnx is the number of grid points we wish to have in our solution. If we have fewer we sacrifice accuracy, if we have more, the computational time increases. You should always check that your solution does not depend on the number of grid points and the grid spacing!\ndx is the spacing between grid points. Similar comments as above in #2\ntimeStepDuration is the amount of 'time' at each step of the solution. Accuracy and stability of the solution depend on choices for the timeStepDuration and the grid point spacing and the diffusion coefficient. We will not discuss stability any further, just know that it is something that needs to be considered.\nsteps is the number of timesteps in dt you wish to run. You can change this value to observe the solution in different stages.\nInitializing the Simulation Domain and Parameters\nStep2: Here we set the diffusion coefficient.\nStep3: Note\nStep4: We assign to 'c' objects that are 'CellVariables'. This is a special type of variable used by FiPy to hold the values for the concentration in our solution. We also create a viewer here so that we can inspect the values of 'c'.\nStep5: Note\nStep6: Setting Initial Conditions\nStep7: Note\nStep8: This line defines the diffusion equation\nStep9: These lines print out a text file with the final data\nStep10: Concentration\nDependent Diffusion\nStandard Imports\nStep11: The parameters of our system.\nL is the LENGTH of our box. You can set this to any value you choose\nhowever, 1 is the easiest.\nnx is the number of grid points we wish to have in our solution. If we have fewer we sacrifice accuracy, if we have more, the computational time increases and we are subject to roundoff error. You should always check that your solution does not depend on the number of grid points and the grid spacing!\ndx is the spacing between grid points. Similar comments as above.\ntimeStepDuration is the amount of 'time' at each step of the solution.\nAccuracy and stability of the solution depend on choices for the timeStepDuration and the grid point spacing and the diffusion coefficient. We will not discuss stability any further, just know that it is something that needs to be considered.\nStep12: Note\nStep13: Note\nStep14: We assign to 'c' objects that are 'CellVariables'. This is a special type of variable used by FiPy to hold the values for the concentration in our solution. c1 for eqn1 and c2 for eqn2\nStep15: Note\nStep16: These lines set diffusant in the initial condition. They are set to the same values for easy comparison. Feel free to change these values.\nStep17: Boundary conditions can be either fixed flux or fixed value. Here, fixed value is used for simple comparison between the diffusion profiles.\nStep18: Note\nStep19: Note\nStep20: The following is an if loop that waits for user input before executing. Iterates for number of steps stated earlier for each equation."},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib osx\nfrom fipy import *\n%matplotlib\nfrom fipy import *\nExplanation: Lecture 20: Introduction to FiPy - Getting to Know the Diffusion Equation\nObjectives:\nUnderstand how to create the diffusion equation in FiPy.\nBe able to change variables in the equation and observe the effects in the diffusion equation solution.\nUnderstand how to save the results to a data file.\nFirst thing we'll do is use the qt backend for interacting with matplotlib. There is documentation in matplotlib about backends. One or more of the available backends may be installed with your python distribution.\nSetting up Plotting\nEnd of explanation\nL = 1.\nnx = 200\ndx = L / nx\ntimeStepDuration = 0.001\nsteps = 100\nExplanation: L is the LENGTH of our box. You can set this to any value you choose however, appropriate scaling of the problem would admit 1 as the length of choice.\nnx is the number of grid points we wish to have in our solution. If we have fewer we sacrifice accuracy, if we have more, the computational time increases. You should always check that your solution does not depend on the number of grid points and the grid spacing!\ndx is the spacing between grid points. Similar comments as above in #2\ntimeStepDuration is the amount of 'time' at each step of the solution. Accuracy and stability of the solution depend on choices for the timeStepDuration and the grid point spacing and the diffusion coefficient. We will not discuss stability any further, just know that it is something that needs to be considered.\nsteps is the number of timesteps in dt you wish to run. You can change this value to observe the solution in different stages.\nInitializing the Simulation Domain and Parameters\nEnd of explanation\nD11 = 0.5 \nExplanation: Here we set the diffusion coefficient.\nEnd of explanation\nmesh = Grid1D(dx = dx, nx = nx)\nExplanation: Note: The 'mesh' command creates the mesh (gridpoints) on which we will solve the equation. This is specific to FiPy.\nAt this point, if you are in the IPython notebook I would suggest you try the following in the cell below:\nPut your cursor to the right of the \"(\" and hit TAB. You will get the docstring for the Grid1D function.\nDo this again with your cursor to the right of the \"d\" in Grid1D().\nAnd again after the \"G\".\nThis is a powerful way to explore the available functions.\nEnd of explanation\nc = CellVariable(name = \"c\", mesh = mesh)\nviewer = MatplotlibViewer(vars=(c,),datamin=-0.1, datamax=1.1, legend=None)\nExplanation: We assign to 'c' objects that are 'CellVariables'. This is a special type of variable used by FiPy to hold the values for the concentration in our solution. We also create a viewer here so that we can inspect the values of 'c'.\nEnd of explanation\nx = mesh.cellCenters\nx\nx\nExplanation: Note: This command sets 'x' to contain a list of numbers that define the x position of the grid-points.\nEnd of explanation\nc.setValue(0.0) \nviewer.plot()\nc.setValue(0.2, where=x < L/2.) \nc.setValue(0.8, where=x > L/2.)\nviewer.plot()\nExplanation: Setting Initial Conditions\nEnd of explanation\nboundaryConditions=(FixedValue(mesh.facesLeft,0.2), \n FixedValue(mesh.facesRight,0.8))\nExplanation: Note: Boundary conditions come in two types. Fixed flux and fixed value. The syntax is: \nFixedValue(mesh.getFacesLeft(), VALUE)\nFixedFlux(mesh.getFacesLeft(), FLUX)\nFixed value boundaries, can set VALUE or FLUX as a float\nEnd of explanation\neqn1 = TransientTerm() == ImplicitDiffusionTerm(D11)\nfor step in range(1000): \n eqn1.solve(c, boundaryConditions = boundaryConditions, dt = timeStepDuration)\n viewer.plot() \nExplanation: This line defines the diffusion equation:\nEnd of explanation\nfrom fipy.viewers.tsvViewer import TSVViewer \nTSVViewer(vars=(c)).plot(filename=\"output.txt\") \n!head output.txt\nExplanation: These lines print out a text file with the final data\nEnd of explanation\n%matplotlib osx\nfrom fipy import *\nExplanation: Concentration\nDependent Diffusion\nStandard Imports\nEnd of explanation\nL = 1.\nnx = 50\ndx = L / nx\ntimeStepDuration = 0.001\nsteps = 100\nExplanation: The parameters of our system.\nL is the LENGTH of our box. You can set this to any value you choose\nhowever, 1 is the easiest.\nnx is the number of grid points we wish to have in our solution. If we have fewer we sacrifice accuracy, if we have more, the computational time increases and we are subject to roundoff error. You should always check that your solution does not depend on the number of grid points and the grid spacing!\ndx is the spacing between grid points. Similar comments as above.\ntimeStepDuration is the amount of 'time' at each step of the solution.\nAccuracy and stability of the solution depend on choices for the timeStepDuration and the grid point spacing and the diffusion coefficient. We will not discuss stability any further, just know that it is something that needs to be considered.\nEnd of explanation\nD1 = 3.0\nExplanation: Note: In the first equation, the diffusion coefficient is constant, concentration independent.\nEnd of explanation\nmesh = Grid1D(dx = dx, nx = nx)\nExplanation: Note: You have seen all of the following code before. This time we are solving two simultaneous equations, eqn1 and eqn2.\nNote: The 'mesh' command creates the mesh (gridpoints) on which we will solve the equation. This is specific to FiPy.\nEnd of explanation\nc1 = CellVariable(\n name = \"c1\",\n mesh = mesh,\n hasOld = True)\nc2 = CellVariable(\n name = \"c2\",\n mesh = mesh,\n hasOld = True)\nExplanation: We assign to 'c' objects that are 'CellVariables'. This is a special type of variable used by FiPy to hold the values for the concentration in our solution. c1 for eqn1 and c2 for eqn2\nEnd of explanation\nx = mesh.cellCenters\nExplanation: Note: This command sets 'x' to contain a list of numbers that define the x position of the grid-points.\nEnd of explanation\nc1.setValue(0.8)\nc1.setValue(0.2, where=x > L/3.)\nc2.setValue(0.8)\nc2.setValue(0.2, where=x > L/3.)\nviewer.plot()\nviewer = Matplotlib1DViewer(vars = (c1,c2), limits = {'datamin': 0., 'datamax': 1.})\nviewer.plot()\nExplanation: These lines set diffusant in the initial condition. They are set to the same values for easy comparison. Feel free to change these values.\nEnd of explanation\nboundaryConditions=(FixedValue(mesh.facesLeft,0.8), FixedValue(mesh.facesRight,0.2))\nboundaryConditions=(FixedFlux(mesh.facesLeft,0.0), FixedFlux(mesh.facesRight,0.0))\nExplanation: Boundary conditions can be either fixed flux or fixed value. Here, fixed value is used for simple comparison between the diffusion profiles.\nEnd of explanation\nD22_0 = 3.0\nD22_1 = 0.5\nD2 = (D22_1 - D22_0)*c2 + D22_0\nExplanation: Note: In the second equation, the diffusion coefficient is non-constant and is a function of concentration in the system. So we use D22_0 and D22_1 as the end points of our function. The function, given by \"D\" is simply a linear interpolation of the two D values. You are, of course, free to try other functions.\nEnd of explanation\neqn1 = TransientTerm() == ImplicitDiffusionTerm(D1) \neqn2 = TransientTerm() == ImplicitDiffusionTerm(D2)\nExplanation: Note: These are the two diffusion equations. The first equation is as in previous code, using a concentration independent D1. The second equation uses a non-constant D described above.\nEnd of explanation\nfor step in range(10):\n c1.updateOld()\n c2.updateOld()\n eqn1.solve(c1, boundaryConditions = boundaryConditions, dt = timeStepDuration)\n eqn2.solve(c2, boundaryConditions = boundaryConditions, dt = timeStepDuration)\n viewer.plot()\nExplanation: The following is an if loop that waits for user input before executing. Iterates for number of steps stated earlier for each equation.\nEnd of explanation"}}},{"rowIdx":2118,"cells":{"Unnamed: 0":{"kind":"number","value":2118,"string":"2,118"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Building Histograms with Bayesian Priors\nAn Introduction to Bayesian Blocks\n========\nVersion 0.1\nBy LM Walkowicz 2019 June 14\nThis notebook makes heavy use of Bayesian block implementations by Jeff Scargle, Jake VanderPlas, Jan Florjanczyk, and the Astropy team.\nBefore you begin, please download the dataset for this notebook.\nProblem 1) Histograms Lie!\nOne of the most common and useful tools for data visualization can be incredibly misleading. Let's revisit how.\nProblem 1a\nFirst, let's make some histograms! Below, I provide some data; please make a histogram of it.\nStep1: Hey, nice histogram! \nBut how do we know we have visualized all the relevant structure in our data? \nPlay around with the binning and consider\nStep2: Problem 1b\nWhat are some issues with histograms? \nTake a few min to discuss this with your partner\nSolution 1b\nwrite your solution here\nProblem 1c\nWe have previously covered a few ways to make histograms better. What are some ways you could improve your histogram?\nTake a few min to discuss this with your partner\nProblem 1d\nThere are lots of ways to improve the previous histogram-- let's implement a KDE representation instead! As you have seen in previous sessions, we will borrow a bit of code from Jake VanderPlas to estimate the KDE. \nAs a reminder, you have a number of choices of kernel in your KDE-- some we have used in the past\nStep4: Problem 1d\nWhich parameters most affected the shape of the final distribution?\nWhat are some possible issues with using a KDE representation of the data?\nDiscuss with your partner\nSolution 1d\nWrite your response here\nProblem 2) Histograms Episode IV\nStep5: Problem 2a \nLet's visualize our data again, but this time we will use Bayesian Blocks.\nPlot a standard histogram (as above), but now plot the Bayesian Blocks representation of the distribution over it.\nStep6: Problem 2b\nHow is the Bayesian Blocks representation different or similar? \nHow might your choice of representation affect your scientific conclusions about your data? \nTake a few min to discuss this with your partner\nIf you are using histograms for analysis, you might infer physical meaning from the presence or absence of features in these distributions. As it happens, histograms of time-tagged event data are often used to characterize physical events in time domain astronomy, for example gamma ray bursts or stellar flares. \nProblem 3) Bayesian Blocks in the wild\nNow we'll apply Bayesian Blocks to some real astronomical data, and explore how our visualization choices may affect our scientific conclusions.\nFirst, let's get some data!\nAll data from NASA missions is hosted on the Mikulski Archive for Space Telescopes (aka MAST). As an aside, the M in MAST used to stand for \"Multimission\", but was changed to honor Sen. Barbara Mikulski (D-MD) for her tireless support of science. \nSome MAST data (mostly the original data products) can be directly accessed using astroquery (there's an extensive guide to interacting with MAST via astroquery here\nStep7: Problem 3c\nLet's look at the distribution of the small planet radii in this table, which are given in units of Earth radii. Select the planets whose radii are Neptune-sized or smaller. \nSelect the planet radii for all planets smaller than Neptune in the table, and visualize the distribution of planet radii using a standard histogram.\nStep8: Problem 3d\nWhat features do you see in the histogram of planet radii? Which of these features are important? \nDiscuss with your partner\nSolution 3d\nWrite your answer here\nProblem 3e\nNow let's try visualizing these data using Bayesian Blocks. Please recreate the histogram you plotted above, and then plot the Bayesian Blocks version over it.\nStep9: Problem 3f\nWhat features do you see in the histogram of planet radii? Which of these features are important? \nDiscuss with your partner.\nHint\nStep10: Problem 3g\nPlease repeat the previous problem, but this time use the astropy implementation of Bayesian Blocks.\nStep11: Putting these results in context\nBoth standard histograms and KDEs can be useful for quickly visualizing data, and in some cases, getting an intuition for the underlying PDF of your data.\nHowever, keep in mind that they both involve making parameter choices that are largely not motivated in any quantitative way. These choices can create wildly misleading representations of your data. \nIn particular, your choices may lead you to make a physical interpretation that may or may not be correct (in our example, bear in mind that the observed distribution of exoplanetary radii informs models of planet formation).\nBayesian Blocks is more than just a variable-width histogram\nWhile KDEs often do a better job of visualizing data than standard histograms do, they also create a loss of information. Philosophically speaking, what Bayesian Blocks do is posit that the \"change points\", also known as the bin edges, contain information that is interesting. When you apply a KDE, you are smoothing your data by creating an approximation, and that can mean you are losing potential insights by removing information. \nWhile Bayesian Blocks are useful as a replacement for histograms in general, their ability to identify change points makes them especially useful for time series analysis. \nProblem 4) Bayesian Blocks for Time Series Analysis\nWhile Bayesian Blocks can be very useful as a simple replacement for histograms, one of its great strengths is in finding \"change points\" in time series data. Finding these change points can be useful for discovering interesting events in time series data you already have, and it can be used in real-time to detect changes that might trigger another action (for example, follow up observations for LSST). \nLet's take a look at a few examples of using Bayesian Blocks in the time series context. \nFirst and foremost, it's important to understand the different between various kinds of time series data. \nEvent data come from photon counting instruments. In these data, the time series typically consists of photon arrival times, usually in a particular range of energies that the instrument is sensitive to. Event data are univariate, in that the time series is \"how many photons at a given time\", or \"how many photons in a given chunk of time\". \nPoint measurements are measurements of a (typically) continuous source at a given moment in time, often with some uncertainty associated with the measurement. These data are multivariate, as your time series relates time, your measurement (e.g. flux, magnitude, etc) and its associated uncertainty to one another. \nProblem 4a\nLet's look at some event data from BATSE, a high energy astrophysics experiment that flew on NASA's Compton Gamma-Ray Observatory. BATSE primarily studied gamma ray bursts (GRBs), capturing its detections in four energy channels\nStep12: Problem 4b\nWhen you reach this point, you and your partner should pick a number between 1 and 4; your number is the channel whose data you will work with. \nUsing the data for your channel, please visualize the photon events in both a standard histogram and using Bayesian Blocks.\nStep13: Problem 4c\nLet's take a moment to reflect on the differences between these two representations of our data. \nPlease discuss with your partner\nStep14: Cool, we have loaded in the FITS file. Let's look at what's in it\nStep15: We want the light curve, so let's check out what's in that part of the file!\nStep16: Problem 5b\nUse a scatter plot to visualize the Kepler lightcurve. I strongly suggest you try displaying it at different scales by zooming in (or playing with the axis limits), so that you can get a better sense of the shape of the lightcurve.\nStep17: Problem 5c\nThese data consist of a variable background, with occasional bright points caused by stellar flares.\nBrainstorm possible approaches to find the flare events in this data. Write down your ideas, and discuss their potential advantages, disadvantages, and any pitfalls that you think might arise. \nDiscuss with your partner\nSolution 5c\nWrite your notes here\nThere are lots of possible ways to approach this problem. In the literature, a very common traditional approach has been to fit the background variability while ignoring the outliers, then to subtract the background fit, and flag any point beyond some threshold value as belonging to a flare. More sophisticated approaches also exist, but they are often quite time consuming (and in many cases, detailed fits require a good starting estimate for the locations of flare events). \nRecall, however, that Bayesian Blocks is particularly effective at identifying change points in our data. Let's see if it can help us in this case!\nProblem 5d\nUse Bayesian Blocks to visualize the Kepler lightcurve. Note that you are now using data that consists of point measurements, rather than event data (as for the BATSE example).\nStep18: Concluding Remarks\nAs you can see, using Bayesian Blocks allowed us to represent the data, including both quiescent variability and flares, without having to smooth, clip, or otherwise alter the data. \nOne potential drawback here is that the change points don't identify the flares themselves, or at least don't identify them as being different from the background variability-- the algorithm identifies change points, but does not know anything about what change points might be interesting to you in particular. \nAnother potential drawback is that it is possible that the Bayesian Block representation may not catch all events, or at least may not, on its own, provide an unambiguous sign that a subtle signal of interest is in the data.\nIn these cases, it is sometimes instructive to use a hybrid approach, where one combines the bins determined from a traditional histogram with the Bayesian Blocks change points. Alternatively, if one has a good model for the background, one can compared the blocks representation of a background-only simulated data set with one containing both background and model signal. \nFurther interesting examples (in the area of high energy physics and astrophysics) are provided by this paper"},"code_prompt":{"kind":"string","value":"Python Code:\n# execute this cell\nnp.random.seed(0)\nx = np.concatenate([stats.cauchy(-5, 1.8).rvs(500),\n stats.cauchy(-4, 0.8).rvs(2000),\n stats.cauchy(-1, 0.3).rvs(500),\n stats.cauchy(2, 0.8).rvs(1000),\n stats.cauchy(4, 1.5).rvs(500)])\n# truncate values to a reasonable range\nx = x[(x > -15) & (x < 15)]\n# complete\n# plt.hist( \nExplanation: Building Histograms with Bayesian Priors\nAn Introduction to Bayesian Blocks\n========\nVersion 0.1\nBy LM Walkowicz 2019 June 14\nThis notebook makes heavy use of Bayesian block implementations by Jeff Scargle, Jake VanderPlas, Jan Florjanczyk, and the Astropy team.\nBefore you begin, please download the dataset for this notebook.\nProblem 1) Histograms Lie!\nOne of the most common and useful tools for data visualization can be incredibly misleading. Let's revisit how.\nProblem 1a\nFirst, let's make some histograms! Below, I provide some data; please make a histogram of it.\nEnd of explanation\n# complete\n# plt.hist \nExplanation: Hey, nice histogram! \nBut how do we know we have visualized all the relevant structure in our data? \nPlay around with the binning and consider: \nWhat features do you see in this data? Which of these features are important?\nEnd of explanation\n# execute this cell\nfrom sklearn.neighbors import KernelDensity\ndef kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):\n kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)\n kde_skl.fit(data[:, np.newaxis])\n log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)\n return np.exp(log_pdf)\n# complete\n# plt.hist( \n# grid = \n# PDF = \n# plt.plot(\nExplanation: Problem 1b\nWhat are some issues with histograms? \nTake a few min to discuss this with your partner\nSolution 1b\nwrite your solution here\nProblem 1c\nWe have previously covered a few ways to make histograms better. What are some ways you could improve your histogram?\nTake a few min to discuss this with your partner\nProblem 1d\nThere are lots of ways to improve the previous histogram-- let's implement a KDE representation instead! As you have seen in previous sessions, we will borrow a bit of code from Jake VanderPlas to estimate the KDE. \nAs a reminder, you have a number of choices of kernel in your KDE-- some we have used in the past: tophat, Epanechnikov, Gaussian. Please plot your original histogram, and then overplot a few example KDEs on top of it.\nEnd of explanation\n# execute this cell\ndef bayesian_blocks(t):\n Bayesian Blocks Implementation\n By Jake Vanderplas. License: BSD\n Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S\n Parameters\n ----------\n t : ndarray, length N\n data to be histogrammed\n Returns\n -------\n bins : ndarray\n array containing the (N+1) bin edges\n Notes\n -----\n This is an incomplete implementation: it may fail for some\n datasets. Alternate fitness functions and prior forms can\n be found in the paper listed above.\n \n # copy and sort the array\n t = np.sort(t)\n N = t.size\n # create length-(N + 1) array of cell edges\n edges = np.concatenate([t[:1],\n 0.5 * (t[1:] + t[:-1]),\n t[-1:]])\n block_length = t[-1] - edges\n # arrays needed for the iteration\n nn_vec = np.ones(N)\n best = np.zeros(N, dtype=float)\n last = np.zeros(N, dtype=int)\n #-----------------------------------------------------------------\n # Start with first data cell; add one cell at each iteration\n #-----------------------------------------------------------------\n for K in range(N):\n # Compute the width and count of the final bin for all possible\n # locations of the K^th changepoint\n width = block_length[:K + 1] - block_length[K + 1]\n count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]\n # evaluate fitness function for these possibilities\n fit_vec = count_vec * (np.log(count_vec) - np.log(width))\n fit_vec -= 4 # 4 comes from the prior on the number of changepoints\n fit_vec[1:] += best[:K]\n # find the max of the fitness: this is the K^th changepoint\n i_max = np.argmax(fit_vec)\n last[K] = i_max\n best[K] = fit_vec[i_max]\n \n #-----------------------------------------------------------------\n # Recover changepoints by iteratively peeling off the last block\n #-----------------------------------------------------------------\n change_points = np.zeros(N, dtype=int)\n i_cp = N\n ind = N\n while True:\n i_cp -= 1\n change_points[i_cp] = ind\n if ind == 0:\n break\n ind = last[ind - 1]\n change_points = change_points[i_cp:]\n return edges[change_points]\nExplanation: Problem 1d\nWhich parameters most affected the shape of the final distribution?\nWhat are some possible issues with using a KDE representation of the data?\nDiscuss with your partner\nSolution 1d\nWrite your response here\nProblem 2) Histograms Episode IV: A New Hope\nHow can we create representations of our data that are robust against the known issues with histograms and KDEs?\nIntroducing: Bayesian Blocks\nWe want to represent our data in the most general possible way, a method that\n* avoids assumptions about smoothness or shape of the signal (which might place limitations on scales and resolution)\n* is nonparametric (doesn't fit some model)\n* finds and characterizes local structure in our time series (in contrast to periodicities)\n(continued)\nhandles arbitrary sampling (i.e. doesn't require evenly spaced samples, doesn't care about sparse samples)\nis as hands-off as possible-- user interventions should be minimal or non-existent \nis applicable to multivariate data\ncan both analyze data after they are collected, and in real time \nBayesian Blocks works by creating a super-simple representation of the data, essentially a piecewise fit that segments our time series. \nIn the implementations we will use today, the model is a piecewise linear fit in time across each individual bin, or \"block\". \none modeling the signal as linear in time across the block:\n$$x(t) = λ(1 + a(t − t_{fid}))$$ \nwhere $\\lambda$ is the signal strength at the fiducial time $t_{fid}$, and the coefficient $a$ determines the rate of change over the block. \nScargle et al. (2012) point out that using a linear fit is good because it makes calculating the fit really easy, but you could potentially use something more complicated (they provide some details for using an exponential model, $x(t) = λe^{a(t−t_{fid}})$, in their Appendix C.\nThe Fitness Function\nThe insight in Bayesian Blocks is that you can use a Bayesian likelihood framework to compute a \"fitness function\" that depends only on the number and size of the blocks. \nIn every block, you are trying to maximize some goodness-of-fit measure for data in that individual block. This fit depends only on the data contained in its block, and is independent of all other data.\nThe optimal segmentation of the time series, then, is the segmentation that maximizes fitness-- the total goodness-of-fit over all the blocks (so for example, you could use the sum over all blocks of whatever your quantitative expression is for the goodness of fit in individual blocks). \nThe entire time series is then represented by a series of segments (blocks) characterized by very few parameters:\n$N_{cp}$: the number of change-points\n$t_{k}^{cp}$: the change-point starting block k\n$X_k$: the signal amplitude in block k\nfor k = 1, 2, ... $N_{cp}$\nWhen using Bayesian Blocks (particularly on time series data) we often speak of \"change points\" rather than segmentation, as the block edges essentially tell us the discrete times at which a signal’s statistical properties change discontinuously, though the segments themselves are constant between these points. \nYou looking at KDEs right now:\nIn some cases (such as some of the examples below), the Bayesian Block representation may look kind of clunky. HOWEVER: remember that histograms and KDEs may sometimes look nicer, but can be really misleading! If you want to derive physical insight from these representations of your data, Bayesian Blocks can provide a means of deriving physically interesting quantities (for example, better estimates of event locations, lags, amplitudes, widths, rise and decay times, etc). \nOn top of that, you can do all of the above without losing or hiding information via smoothing or other model assumptions. \nHOW MANY BLOCKS, THO?!\nWe began this lesson by bemoaning that histograms force us to choose a number of bins, and that KDEs require us to choose a bandwidth. Furthermore, one of the requirements we had for a better way forward was that the user interaction be minimal or non-existent. What to do?\nBayesian Blocks works by defining a prior distribution for the number of blocks, such that a single parameter controls the steepness of this prior (in other words, the relative probability for smaller or larger numbers of blocks. \nOnce this prior is defined, the size, number, and locations of the blocks are determined solely and uniquely by the data. \nSo, what does the prior look like?\nIn most cases, $N_{blocks}$ << N (you are, after all, still binning your data-- if $N_{blocks}$ was close to N, you wouldn't really be doing much). Scargle et al. (2012) adopts a geometric prior (Coram 2002), which assigns smaller probability to a large number of blocks:\n$$P(N_{blocks}) = P_{0}\\gamma N_{blocks}$$\nfor $0 ≤ N_{blocks} ≤ N$, and zero otherwise since $N_{blocks}$ cannot be negative or larger than the number of data cells. \nSubstituting in the normalization constant $P_{0}$ gives\n$$P(N_{blocks}) = \\frac{1−\\gamma}{1-\\gamma^{N+1}}\\gamma^{N_{blocks}}$$\nEssentially, this prior says that finding k + 1 blocks is less likely than finding k blocks by the constant factor $\\gamma$. Scargle (2012) also provides a nice intuitive way of thinking about $\\gamma$: $\\gamma$ is adjusting the amount of structure in the resulting representation.\nThe Magic Part\nAt this point, you may be wondering about how the algorithm is capable of finding an optimal number of blocks. As Scargle et al (2012) admits\nthe number of possible partitions (i.e. the number of ways N cells can be arranged in blocks) is $2^N$. This number is exponentially large, rendering an explicit exhaustive search of partition space utterly impossible for all but very small N.\nIn his blog post on Bayesian Blocks, Jake VdP compares the algorithm's use of dynamic programming to mathematical induction. For example: how could you prove that \n$$1 + 2 + \\cdots + n = \\frac{n(n+1)}{2}$$\nis true for all positive integers $n$? An inductive proof of this formula proceeds in the following fashion:\nBase Case: We can easily show that the formula holds for $n = 1$.\nInductive Step: For some value $k$, assume that $1 + 2 + \\cdots + k = \\frac{k(k+1)}{2}$ holds. \nAdding $(k + 1)$ to each side and rearranging the result yields \n$$1 + 2 + \\cdots + k + (k + 1) = \\frac{(k + 1)(k + 2)}{2}$$\nLooking closely at this, we see that we have shown the following: if our formula is true for $k$, then it must be true for $k + 1$.\nBy 1 and 2, we can show that the formula is true for any positive integer $n$, simply by starting at $n=1$ and repeating the inductive step $n - 1$ times.\nIn the Bayesian Blocks algorithm, one can find the optimal binning for a single data point; so by analogy with our example above (full details are given in the Appendix of Scargle et al. 2012), if you can find the optimal binning for $k$ points, it's a short step to the optimal binning for $k + 1$ points. \nSo, rather than performing an exhaustive search of all possible bins, which would scale as $2^N$, the time to find the optimal binning instead scales as $N^2$.\nPlaying with Blocks\nWe will begin playing with (Bayesian) blocks with a simple implementation, outlined by Jake VanderPlas in this blog: https://jakevdp.github.io/blog/2012/09/12/dynamic-programming-in-python/\nEnd of explanation\n# complete \nplt.hist(\nExplanation: Problem 2a \nLet's visualize our data again, but this time we will use Bayesian Blocks.\nPlot a standard histogram (as above), but now plot the Bayesian Blocks representation of the distribution over it.\nEnd of explanation\n# complete\nExplanation: Problem 2b\nHow is the Bayesian Blocks representation different or similar? \nHow might your choice of representation affect your scientific conclusions about your data? \nTake a few min to discuss this with your partner\nIf you are using histograms for analysis, you might infer physical meaning from the presence or absence of features in these distributions. As it happens, histograms of time-tagged event data are often used to characterize physical events in time domain astronomy, for example gamma ray bursts or stellar flares. \nProblem 3) Bayesian Blocks in the wild\nNow we'll apply Bayesian Blocks to some real astronomical data, and explore how our visualization choices may affect our scientific conclusions.\nFirst, let's get some data!\nAll data from NASA missions is hosted on the Mikulski Archive for Space Telescopes (aka MAST). As an aside, the M in MAST used to stand for \"Multimission\", but was changed to honor Sen. Barbara Mikulski (D-MD) for her tireless support of science. \nSome MAST data (mostly the original data products) can be directly accessed using astroquery (there's an extensive guide to interacting with MAST via astroquery here: https://astroquery.readthedocs.io/en/latest/mast/mast.html). \nIn addition, MAST also hosts what are called \"Higher Level Science Products\", or HLSPs, which are data derived by science teams in the course of doing their analyses. You can see a full list of HLSPs here: https://archive.stsci.edu/hlsp/hlsp-table\nThese data tend to be more heterogeneous, and so are not currently accessible through astroquery (for the most part). They will be added in the future. But never fear! You can also submit SQL queries via MAST's CasJobs interface.\nGo to the MAST CasJobs http://mastweb.stsci.edu/mcasjobs/home.aspx\nIf I have properly remembered to tell you to create a MAST CasJobs login, you can login now (and if not, just go ahead and sign up now, it's fast)!\nWe will be working with the table of new planet radii by Berger et al. (2019). \nIf you like, you can check out the paper here! https://arxiv.org/pdf/1805.00231.pdf\nFrom the \"Query\" tab, select \"HLSP_KG_RADII\" from the Context drop-down menu. \nYou can then enter your query. In this example, we are doing a simple query to get all the KG-RADII radii and fluxes from the exoplanets catalog, which you could use to reproduce the first figure, above. For short queries that can execute in less than 60 seconds, you can hit the \"Quick\" button and the results of your query will be displayed below, where you can export them as needed. For longer queries like this one, you can select into an output table (otherwise a default like MyDB.MyTable will be used), hit the \"Submit\" button, and when finished your output table will be available in the MyDB tab.\nProblem 3a\nWrite a SQL query to fetch this table from MAST using CasJobs.\nYour possible variables are KIC_ID, KOI_ID, Planet_Radius, Planet_Radius_err_upper, Planet_Radius_err_lower, Incident_Flux, Incident_Flux_err_upper, Incident_Flux_err_lower, AO_Binary_Flag\nFor very short queries you can use \"Quick\" for your query; this table is large enough that you should use \"Submit\".\nHint: You will want to SELECT some stuff FROM a table called exoplanet_parameters\nSolution 3a\nWrite your SQL query here\nOnce your query has completed, you will go to the MyDB tab to see the tables you have generated (in the menu at left). From here, you can click on a table, and select Download. I would recommend downloading your file as a CSV (comma-separated value) file, as CSV are simple, and can easily read into python via a variety of methods.\nProblem 3b\nTime to read in the data! There are several ways of importing a csv into python... choose your favorite and load in the table you downloaded.\nEnd of explanation\n# complete\nExplanation: Problem 3c\nLet's look at the distribution of the small planet radii in this table, which are given in units of Earth radii. Select the planets whose radii are Neptune-sized or smaller. \nSelect the planet radii for all planets smaller than Neptune in the table, and visualize the distribution of planet radii using a standard histogram.\nEnd of explanation\n# complete\nExplanation: Problem 3d\nWhat features do you see in the histogram of planet radii? Which of these features are important? \nDiscuss with your partner\nSolution 3d\nWrite your answer here\nProblem 3e\nNow let's try visualizing these data using Bayesian Blocks. Please recreate the histogram you plotted above, and then plot the Bayesian Blocks version over it.\nEnd of explanation\nimport astropy.stats.bayesian_blocks as bb\nExplanation: Problem 3f\nWhat features do you see in the histogram of planet radii? Which of these features are important? \nDiscuss with your partner.\nHint: maybe you should look at some of the comments in the implementation of Bayesian Blocks we are using\nSolution 3f\nWrite your answer here\nOK, so in this case, the Bayesian Blocks representation of the data looks fairly different. A couple things might stand out to you:\n* There are large spikes in the Bayesian Blocks representation that are not present in the standard histogram\n* If we're just looking at the Bayesian Block representation, it's not totally clear whether one should believe that there are two peaks in the distribution. \nHMMMMMmmmm....\nWait! Jake VDP told us to watch out for this implementation. Maybe we should use something a little more official instead...\nGOOD NEWS~! There is a Bayesian Blocks implementation included in astropy. Let's try that. \nhttp://docs.astropy.org/en/stable/api/astropy.stats.bayesian_blocks.html\nImportant note\nThere is a known issue in the astropy implementation of Bayesian Blocks; see: https://github.com/astropy/astropy/issues/8317\nIt is possible this issue will be fixed in a future release, but in the event this problem arises for you, you will need to edit bayesian_blocks.py to include the following else statement (see issue link above for exact edit):\nif self.ncp_prior is None:\n ncp_prior = self.compute_ncp_prior(N)\n else:\n ncp_prior = self.ncp_prior\nEnd of explanation\n# complete\nExplanation: Problem 3g\nPlease repeat the previous problem, but this time use the astropy implementation of Bayesian Blocks.\nEnd of explanation\n# complete\nExplanation: Putting these results in context\nBoth standard histograms and KDEs can be useful for quickly visualizing data, and in some cases, getting an intuition for the underlying PDF of your data.\nHowever, keep in mind that they both involve making parameter choices that are largely not motivated in any quantitative way. These choices can create wildly misleading representations of your data. \nIn particular, your choices may lead you to make a physical interpretation that may or may not be correct (in our example, bear in mind that the observed distribution of exoplanetary radii informs models of planet formation).\nBayesian Blocks is more than just a variable-width histogram\nWhile KDEs often do a better job of visualizing data than standard histograms do, they also create a loss of information. Philosophically speaking, what Bayesian Blocks do is posit that the \"change points\", also known as the bin edges, contain information that is interesting. When you apply a KDE, you are smoothing your data by creating an approximation, and that can mean you are losing potential insights by removing information. \nWhile Bayesian Blocks are useful as a replacement for histograms in general, their ability to identify change points makes them especially useful for time series analysis. \nProblem 4) Bayesian Blocks for Time Series Analysis\nWhile Bayesian Blocks can be very useful as a simple replacement for histograms, one of its great strengths is in finding \"change points\" in time series data. Finding these change points can be useful for discovering interesting events in time series data you already have, and it can be used in real-time to detect changes that might trigger another action (for example, follow up observations for LSST). \nLet's take a look at a few examples of using Bayesian Blocks in the time series context. \nFirst and foremost, it's important to understand the different between various kinds of time series data. \nEvent data come from photon counting instruments. In these data, the time series typically consists of photon arrival times, usually in a particular range of energies that the instrument is sensitive to. Event data are univariate, in that the time series is \"how many photons at a given time\", or \"how many photons in a given chunk of time\". \nPoint measurements are measurements of a (typically) continuous source at a given moment in time, often with some uncertainty associated with the measurement. These data are multivariate, as your time series relates time, your measurement (e.g. flux, magnitude, etc) and its associated uncertainty to one another. \nProblem 4a\nLet's look at some event data from BATSE, a high energy astrophysics experiment that flew on NASA's Compton Gamma-Ray Observatory. BATSE primarily studied gamma ray bursts (GRBs), capturing its detections in four energy channels: ~25-55 keV, 55-110 keV, 110-320 keV, and >320 keV.\nYou have been given four text files that record one of the BATSE GRB detections. Please read these data in.\nEnd of explanation\n# complete\nExplanation: Problem 4b\nWhen you reach this point, you and your partner should pick a number between 1 and 4; your number is the channel whose data you will work with. \nUsing the data for your channel, please visualize the photon events in both a standard histogram and using Bayesian Blocks.\nEnd of explanation\n# execute this cell\nkplr_hdul = fits.open('./data/kplr009726699-2011271113734_llc.fits')\nExplanation: Problem 4c\nLet's take a moment to reflect on the differences between these two representations of our data. \nPlease discuss with your partner:\n* How many bursts are present in these two representations? \n* How accurately would you be able to identify the time of the burst(s) from these representations? What about other quantities?\nFor the groups who worked with Channel 3:\nYou may have noticed very sharp features in your blocks representation of the data. Are they real?\nTo quote Jake VdP:\nSimply put, there are spikes because the piecewise constant likelihood model says that spikes are favored. By saying that the spikes seem unphysical, you are effectively adding a prior on the model based on your intuition of what it should look like.\nTo quote Jeff Scargle:\nTrust the algorithm!\nProblem 5: Finding flares\nAs we have just seen, Bayesian Blocks can be very useful for finding transient events-- it worked great on our photon counts from BATSE! Let's try it on some slightly more complicated data: lightcurves from NASA's Kepler mission. Kepler's data consists primarily of point measures (rather than events)-- a Kepler lightcurve is just the change in the brightness of the star over time (with associated uncertainties).\nProblem 5a\nPeople often speak of transients (like the GRB we worked with above) and variables (like RR Lyrae or Cepheid stars) as being two completely different categories of changeable astronomical objects. However, some objects exhibit both variability and transient events. Magnetically active stars are one example of these objects: many of them have starspots that rotate into and out of view, creating periodic (or semi-periodic) variability, but they also have flares, magnetic reconnection events that create sudden, rapid changes in the stellar brightness. \nA challenge in identifying flares is that they often appear against a background that is itself variable. While their are many approaches to fitting both quiescent and flare variability (Gaussian processes, which you saw earlier this week, are often used for exactly this purpose!), they can be very time consuming. \nLet's read in some data, and see whether Bayesian Blocks can help us here.\nEnd of explanation\n# execute this cell\nkplr_hdul.info()\nExplanation: Cool, we have loaded in the FITS file. Let's look at what's in it:\nEnd of explanation\n# execute this cell\nlcdata = kplr_hdul[1].data\nlcdata.columns\n# execute this cell \nt = lcdata['TIME']\nf = lcdata['PDCSAP_FLUX']\ne = lcdata['PDCSAP_FLUX_ERR']\nt = t[~np.isnan(f)]\ne = e[~np.isnan(f)]\nf = f[~np.isnan(f)]\nnf = f / np.median(f)\nne = e / np.median(f)\nExplanation: We want the light curve, so let's check out what's in that part of the file!\nEnd of explanation\n# complete \nExplanation: Problem 5b\nUse a scatter plot to visualize the Kepler lightcurve. I strongly suggest you try displaying it at different scales by zooming in (or playing with the axis limits), so that you can get a better sense of the shape of the lightcurve.\nEnd of explanation\n# complete\nedges = \n#\n#\n#\nplt.step(\nExplanation: Problem 5c\nThese data consist of a variable background, with occasional bright points caused by stellar flares.\nBrainstorm possible approaches to find the flare events in this data. Write down your ideas, and discuss their potential advantages, disadvantages, and any pitfalls that you think might arise. \nDiscuss with your partner\nSolution 5c\nWrite your notes here\nThere are lots of possible ways to approach this problem. In the literature, a very common traditional approach has been to fit the background variability while ignoring the outliers, then to subtract the background fit, and flag any point beyond some threshold value as belonging to a flare. More sophisticated approaches also exist, but they are often quite time consuming (and in many cases, detailed fits require a good starting estimate for the locations of flare events). \nRecall, however, that Bayesian Blocks is particularly effective at identifying change points in our data. Let's see if it can help us in this case!\nProblem 5d\nUse Bayesian Blocks to visualize the Kepler lightcurve. Note that you are now using data that consists of point measurements, rather than event data (as for the BATSE example).\nEnd of explanation\n# complete\nyour_data = \n# complete\ndef stratified_bayesian_blocks(\nExplanation: Concluding Remarks\nAs you can see, using Bayesian Blocks allowed us to represent the data, including both quiescent variability and flares, without having to smooth, clip, or otherwise alter the data. \nOne potential drawback here is that the change points don't identify the flares themselves, or at least don't identify them as being different from the background variability-- the algorithm identifies change points, but does not know anything about what change points might be interesting to you in particular. \nAnother potential drawback is that it is possible that the Bayesian Block representation may not catch all events, or at least may not, on its own, provide an unambiguous sign that a subtle signal of interest is in the data.\nIn these cases, it is sometimes instructive to use a hybrid approach, where one combines the bins determined from a traditional histogram with the Bayesian Blocks change points. Alternatively, if one has a good model for the background, one can compared the blocks representation of a background-only simulated data set with one containing both background and model signal. \nFurther interesting examples (in the area of high energy physics and astrophysics) are provided by this paper:\nhttps://arxiv.org/abs/1708.00810\nChallenge Problem\nBayesian Blocks is so great! However, there are occasions in which it doesn't perform all that well-- particularly when there are large numbers of repeated values in the data. Jan Florjanczyk, a senior data scientist at Netflix, has written up a description of the problem, and implemented a version of Bayesian Blocks that does a better job on data with repeating values. Read his blog post on \"Stratfied Bayesian Blocks\" and try implementing it! \nhttps://medium.com/@janplus/stratified-bayesian-blocks-2bd77c1e6cc7\nYou can either apply this to your favorite data set, or you can use it on the stellar parameters table associated with the data set we pulled from MAST earlier (recall that you previously worked with the exoplanet parameters table). You can check out the fields you can search on using MAST CasJobs, or just download a FITS file of the full table, here: https://archive.stsci.edu/prepds/kg-radii/\nThis GIF is of Dan Shiffman, who has a Youtube channel called Coding Train\nEnd of explanation"}}},{"rowIdx":2119,"cells":{"Unnamed: 0":{"kind":"number","value":2119,"string":"2,119"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n States\nA Riemann Problem is specified by the state of the material to the left and right of the interface. In this hydrodynamic problem, the state is fully determined by an equation of state and the variables\n$$\n {\\bf U} = \\begin{pmatrix} \\rho_0 \\ v_x \\ v_t \\ \\epsilon \\end{pmatrix},\n$$\nwhere $\\rho_0$ is the rest-mass density, $v_x$ the velocity normal to the interface, $v_t$ the velocity tangential to the interface, and $\\epsilon$ the specific internal energy.\nDefining a state\nIn r3d2 we define a state from an equation of state and the values of the key variables\nStep1: Inside the notebook, the state will automatically display the values of the key variables\nStep2: Adding a label to the state for output purposes requires an extra keyword\nStep3: Reactive states\nIf the state has energy available for reactions, that information is built into the equation of state. The definition of the equation of state changes\nStep4: Additional functions\nA state knows its own wavespeeds. Given a wavenumber (the left acoustic wave is 0, the middle contact or advective wave is 1, and the right acoustic wave is 2), we have\nStep5: A state will return the key primitive variables ($\\rho, v_x, v_t, \\epsilon$)\nStep6: A state will return all the variables it computes, which is $\\rho, v_x, v_t, \\epsilon, p, W, h, c_s$"},"code_prompt":{"kind":"string","value":"Python Code:\nfrom r3d2 import eos_defns, State\neos = eos_defns.eos_gamma_law(5.0/3.0)\nU = State(1.0, 0.1, 0.0, 2.0, eos)\nExplanation: States\nA Riemann Problem is specified by the state of the material to the left and right of the interface. In this hydrodynamic problem, the state is fully determined by an equation of state and the variables\n$$\n {\\bf U} = \\begin{pmatrix} \\rho_0 \\ v_x \\ v_t \\ \\epsilon \\end{pmatrix},\n$$\nwhere $\\rho_0$ is the rest-mass density, $v_x$ the velocity normal to the interface, $v_t$ the velocity tangential to the interface, and $\\epsilon$ the specific internal energy.\nDefining a state\nIn r3d2 we define a state from an equation of state and the values of the key variables:\nEnd of explanation\nU\nExplanation: Inside the notebook, the state will automatically display the values of the key variables:\nEnd of explanation\nU2 = State(10.0, -0.3, 0.1, 5.0, eos, label=\"L\")\nU2\nExplanation: Adding a label to the state for output purposes requires an extra keyword:\nEnd of explanation\nq_available = 0.1\nt_ignition = 10.0\nCv = 1.0\neos_reactive = eos_defns.eos_gamma_law_react(5.0/3.0, q_available, Cv, t_ignition, eos)\nU_reactive = State(5.0, 0.1, 0.1, 2.0, eos_reactive, label=\"Reactive\")\nU_reactive\nExplanation: Reactive states\nIf the state has energy available for reactions, that information is built into the equation of state. The definition of the equation of state changes: the definition of the state itself does not:\nEnd of explanation\nprint(\"Left wavespeed of first state is {}\".format(U.wavespeed(0)))\nprint(\"Middle wavespeed of second state is {}\".format(U2.wavespeed(1)))\nprint(\"Right wavespeed of reactive state is {}\".format(U.wavespeed(2)))\nExplanation: Additional functions\nA state knows its own wavespeeds. Given a wavenumber (the left acoustic wave is 0, the middle contact or advective wave is 1, and the right acoustic wave is 2), we have:\nEnd of explanation\nprint(\"Primitive variables of first state are {}\".format(U.prim()))\nExplanation: A state will return the key primitive variables ($\\rho, v_x, v_t, \\epsilon$):\nEnd of explanation\nprint(\"All variables of second state are {}\".format(U.state()))\nExplanation: A state will return all the variables it computes, which is $\\rho, v_x, v_t, \\epsilon, p, W, h, c_s$: the primitive variables as above, the pressure $p$, Lorentz factor $W$, specific enthalpy $h$, and speed of sound $c_s$:\nEnd of explanation"}}},{"rowIdx":2120,"cells":{"Unnamed: 0":{"kind":"number","value":2120,"string":"2,120"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Outline\nGlossary\n1. Radio Science using Interferometric Arrays\nPrevious\nStep1: Import section specific modules\nStep2: 1.6.1 Synchrotron Emission\nStep3: Figure 1.6.1 Example path of a charged particle accelerated in a magnetic field\nThe frequency of gyration in the non-relativistic case is simply\n$$\\omega = \\frac{qB}{mc} $$\nFor synchrotron radiation, this gets modified to\n$$\\omega_{G}= \\frac{qB}{\\gamma mc} $$\nsince, in the relativistic case, the mass is modified to $m \\rightarrow \\gamma m$. \nIn the non-relativistic case (i.e. cyclotron radiation) the frequency of gyration corresponds to the frequency of the emitted radiation. If this was also the case for the synchrotron radiation then, for magnetic fields typically found in galaxies (a few micro-Gauss or so), the resultant frequency would be less than one Hertz! Fortunately the relativistic beaming and Doppler effects come into play increasing the frequency of the observed radiation by a factor of about $\\gamma^{3}$. This brings the radiation into the radio regime. This frequency, known also as the 'critical frequency' is at most of the emission takes place. It is given by\n$$\\nu_{c} \\propto \\gamma^{3}\\nu_{G} \\propto E^{2}$$\n&nbsp;&nbsp;&nbsp;&nbsp;\nImport standard modules:\nEnd of explanation\nfrom IPython.display import Image\nHTML('../style/code_toggle.html')\nExplanation: Import section specific modules:\nEnd of explanation\nImage(filename='figures/drawing.png', width=300)\nExplanation: 1.6.1 Synchrotron Emission:\nSychrotron emission is one of the most commonly encountered forms of radiation found from astronomical radio sources. This type of radiation originates from relativistic particles get accelerated in a magnetic field. \nThe mechanism by which synchrotron emission occurs depends fundamentally on special relativistic effects. We won't delve into the details here. Instead we will try to explain (in a rather hand wavy way) some of the underlying physics. As we have seen in $\\S$ 1.2.1 &#10142;,\n LB:RF:this is the original link but I don't think it points to the right place. Add a reference to where this is discussed and link to that. See also comment in previous section about where the Larmor formula is first introduced\nan accelerating charge emitts radiation. The acceleration is a result of the charge moving through an ambient magnetic field. The non-relativistic Larmor formula for the radiated power is: \n$$P= \\frac{2}{3}\\frac{q^{2}a^{2}}{c^{3}}$$\nIf the acceleration is a result of a magnetic field $B$, we get:\n$$P=\\frac{2}{3}\\frac{q^{2}}{c^{3}}\\frac{v_{\\perp}^{2}B^{2}q^{2}}{m^{2}} $$\nwhere $v_{\\perp}$ is the component of velocity of the particle perpendicular to the magnetic field, $m$ is the mass of the charged particle, $q$ is it's charge and $a$ is its acceleration. This is essentially the cyclotron radiation. Relativistic effects (i.e. as $v_\\perp \\rightarrow c$) modifies this to:\n$$P = \\gamma^{2} \\frac{2}{3}\\frac{q^{2}}{c^{3}}\\frac{v_{\\perp}^{2}B^{2}q^{2}}{m^{2}c^{2}} = \\gamma^{2} \\frac{2}{3}\\frac{q^{4}}{c^{3}}\\frac{v_{\\perp}^{2}B^{2}}{m^{2}c^{2}} $$ \nwhere $$\\gamma = \\frac{1}{\\sqrt{1+v^{2}/c^{2}}} = \\frac{E}{mc^{2}} $$ \n LB:IC: This is a very unusual form for the relativistic version of Larmor's formula. I suggest clarifying the derivation. \nis a measure of the energy of the particle. Non-relativistic particles have $\\gamma \\sim 1$ whereas relativistic and ultra-relativistic particles typically have $\\gamma \\sim 100$ and $\\gamma \\geq 1000$ respectively. Since $v_{\\perp}= v \\sin\\alpha$, with $\\alpha$ being the angle between the magnetic field and the velocity of the particle, the radiated power can be written as:\n$$P=\\gamma^{2} \\frac{2}{3}\\frac{q^{4}}{c^{3}}\\frac{v^{2}B^{2}\\sin\\alpha^{2}}{m^{2}c^{2}} $$\nFrom this equation it can be seen that the total power radiated by the particle depends on the strength of the magnetic field and that the higher the energy of the particle, the more power it radiates. \nIn analogy with the non-relativistic case, there is a frequency of gyration. This refers to the path the charged particle follows while being accelerated in a magnetic field. The figure below illustrates the idea.\nEnd of explanation\nImage(filename='figures/cygnusA.png')\nExplanation: Figure 1.6.1 Example path of a charged particle accelerated in a magnetic field\nThe frequency of gyration in the non-relativistic case is simply\n$$\\omega = \\frac{qB}{mc} $$\nFor synchrotron radiation, this gets modified to\n$$\\omega_{G}= \\frac{qB}{\\gamma mc} $$\nsince, in the relativistic case, the mass is modified to $m \\rightarrow \\gamma m$. \nIn the non-relativistic case (i.e. cyclotron radiation) the frequency of gyration corresponds to the frequency of the emitted radiation. If this was also the case for the synchrotron radiation then, for magnetic fields typically found in galaxies (a few micro-Gauss or so), the resultant frequency would be less than one Hertz! Fortunately the relativistic beaming and Doppler effects come into play increasing the frequency of the observed radiation by a factor of about $\\gamma^{3}$. This brings the radiation into the radio regime. This frequency, known also as the 'critical frequency' is at most of the emission takes place. It is given by\n$$\\nu_{c} \\propto \\gamma^{3}\\nu_{G} \\propto E^{2}$$\n LB:IC: The last sentence is not clear. Why is it called the critical frequency? How does it come about? \nSo far we have discussed a single particle emitting synchrotron radiation. However, what we really want to know is what happens in the case of an ensemble of radiating particles. Since, in an (approximately) uniform magnetic field, the synchrotron emission depends only on the magnetic field and the energy of the particle, all we need is the distribution function of the particles. Denoting the distribution function of the particles as $N(E)$ (i.e. the number of particles at energy $E$ per unit volume per solid angle), the spectrum resulting from an ensemble of particles is: \n$$ \\epsilon(E) dE = N(E) P(E) dE $$\n LB:IC: Clarify what is $P(E)$. How does the spectrum come about? \nThe usual assumption made about the distribution $N(E)$ (based also on the observed cosmic ray distribution) is that of a power law, i.e.\n$$N(E)dE=E^{-\\alpha}dE $$\nPlugging in this and remembering that $P(E) \\propto \\gamma^{2} \\propto E^{2}$, we get\n$$ \\epsilon(E) dE \\propto E^{2-\\alpha} dE $$ \nShifting to the frequency domain \n$$\\epsilon(\\nu) \\propto \\nu^{(1-\\alpha)/2} $$ \nThe usual value for $\\alpha$ is 5/2 and since flux $S_{\\nu} \\propto \\epsilon_{\\nu}$ \n$$S_{\\nu} \\propto \\nu^{-0.75} $$\nThis shows that the synchrotron flux is also a power law, if the underlying distribution of particles is a power law.\n LB:IC: The term spectral index is used below without being properly introduced. Introduce the notion of a spectral index here. \nThis is approximately valid for 'fresh' collection of radiating particles. However, as mentioned above, the higher energy particles lose energy through radiation much faster than the lower energy particles. This means that the distribution of particles over time gets steeper at higher frequencies (which is where the contribution from the high energy particles comes in). As we will see below, this steepening of the spectral index is a typical feature of older plasma in astrophysical scenarios.\n1.6.2 Sources of Synchrotron Emission:\nSo where do we actually see synchrotron emission? As mentioned above, the prerequisites are magnetic fields and relativistic particles. These conditions are satisfied in a variety of situations. Prime examples are the lobes of radio galaxies. The lobes contain relativistic plasma in magnetic fields of strength ~ $\\mu$G. It is believed that these plasmas and magnetic fields ultimately originate from the activity in the center of radio galaxies where a supermassive black hole resides. The figure below shows a radio image of the radio galaxy nearest to us, Cygnus A.\nEnd of explanation\n# Data taken from Steenbrugge et al.,2010, MNRAS\nfreq=(151.0,327.5,1345.0,4525.0,8514.9,14650.0)\nflux_L=(4746,2752.7,749.8,189.4,83.4,40.5)\nflux_H=(115.7,176.4,69.3,45.2,20.8,13.4)\nfig,ax = plt.subplots()\nax.loglog(freq,flux_L,'bo--',label='Lobe Flux')\nax.loglog(freq,flux_H,'g*-',label='Hotspot Flux')\nax.legend()\nax.set_xlabel(\"Frequency (MHz)\")\nax.set_ylabel(\"Flux (Jy)\")\nExplanation: Figure 1.6.2 Cygnus A: Example of Synchroton Emission\nThe jets, which carry relativistic charged particles or plasma originating from the centre of the host galaxy (marked as 'core' in the figure), collide with the surrounding medium at the places labelled as \"hotspots\" in the figure. The plasma responsible for the radio emission (the lobes) tends to stream backward from the hotspots. As a result we can expect the youngest plasma to reside in and around the hotspots. On the other hand, we can expect the plasma closest to the core to be the oldest. But is there a way to verify this?\nWell, the non-thermal nature of the emission can be verified by measuring the spectrum of the radio emission. A value close to -0.7 suggests, by the reasoning given above, that the radiation results from a synchroton emission mechanism. The plots below show the spectrum of the lobes of Cygnus A within a frequency range of 150 MHz to 14.65 GHz. \n LB:RF: Add proper citation. \nEnd of explanation"}}},{"rowIdx":2121,"cells":{"Unnamed: 0":{"kind":"number","value":2121,"string":"2,121"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Executed\nStep1: Load software and filenames definitions\nStep2: Data folder\nStep3: List of data files\nStep4: Data load\nInitial loading of the data\nStep5: Laser alternation selection\nAt this point we have only the timestamps and the detector numbers\nStep6: We need to define some parameters\nStep7: We should check if everithing is OK with an alternation histogram\nStep8: If the plot looks good we can apply the parameters with\nStep9: Measurements infos\nAll the measurement data is in the d variable. We can print it\nStep10: Or check the measurements duration\nStep11: Compute background\nCompute the background using automatic threshold\nStep12: Burst search and selection\nStep14: Donor Leakage fit\nHalf-Sample Mode\nFit peak usng the mode computed with the half-sample algorithm (Bickel 2005).\nStep15: Gaussian Fit\nFit the histogram with a gaussian\nStep16: KDE maximum\nStep17: Leakage summary\nStep18: Burst size distribution\nStep19: Fret fit\nMax position of the Kernel Density Estimation (KDE)\nStep20: Weighted mean of $E$ of each burst\nStep21: Gaussian fit (no weights)\nStep22: Gaussian fit (using burst size as weights)\nStep23: Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE)\nStep24: The Maximum likelihood fit for a Gaussian population is the mean\nStep25: Computing the weighted mean and weighted standard deviation we get\nStep26: Save data to file\nStep27: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.\nStep28: This is just a trick to format the different variables"},"code_prompt":{"kind":"string","value":"Python Code:\nph_sel_name = \"Dex\"\ndata_id = \"22d\"\n# ph_sel_name = \"all-ph\"\n# data_id = \"7d\"\nExplanation: Executed: Mon Mar 27 11:36:04 2017\nDuration: 8 seconds.\nusALEX-5samples - Template\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.\nEnd of explanation\nfrom fretbursts import *\ninit_notebook()\nfrom IPython.display import display\nExplanation: Load software and filenames definitions\nEnd of explanation\ndata_dir = './data/singlespot/'\nimport os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir\nExplanation: Data folder:\nEnd of explanation\nfrom glob import glob\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\nph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'), \n 'DexDem': Ph_sel(Dex='Dem')}\nph_sel = ph_sel_map[ph_sel_name]\ndata_id, ph_sel_name\nExplanation: List of data files:\nEnd of explanation\nd = loader.photon_hdf5(filename=files_dict[data_id])\nExplanation: Data load\nInitial loading of the data:\nEnd of explanation\nd.ph_times_t, d.det_t\nExplanation: Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:\nEnd of explanation\nd.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)\nExplanation: We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:\nEnd of explanation\nplot_alternation_hist(d)\nExplanation: We should check if everithing is OK with an alternation histogram:\nEnd of explanation\nloader.alex_apply_period(d)\nExplanation: If the plot looks good we can apply the parameters with:\nEnd of explanation\nd\nExplanation: Measurements infos\nAll the measurement data is in the d variable. We can print it:\nEnd of explanation\nd.time_max\nExplanation: Or check the measurements duration:\nEnd of explanation\nd.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\ndplot(d, timetrace_bg)\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa\nExplanation: Compute background\nCompute the background using automatic threshold:\nEnd of explanation\nbs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel)\nd.burst_search(**bs_kws)\nth1 = 30\nds = d.select_bursts(select_bursts.size, th1=30)\nbursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True)\n .round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4}))\nbursts.head()\nburst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'\n .format(sample=data_id, th=th1, **bs_kws))\nburst_fname\nbursts.to_csv(burst_fname)\nassert d.dir_ex == 0\nassert d.leakage == 0\nprint(d.ph_sel)\ndplot(d, hist_fret);\n# if data_id in ['7d', '27d']:\n# ds = d.select_bursts(select_bursts.size, th1=20)\n# else:\n# ds = d.select_bursts(select_bursts.size, th1=30)\nds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)\nn_bursts_all = ds.num_bursts[0]\ndef select_and_plot_ES(fret_sel, do_sel):\n ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)\n ds_do = ds.select_bursts(select_bursts.ES, **do_sel)\n bpl.plot_ES_selection(ax, **fret_sel)\n bpl.plot_ES_selection(ax, **do_sel) \n return ds_fret, ds_do\nax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)\nif data_id == '7d':\n fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)\n do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) \n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n \nelif data_id == '12d':\n fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\nelif data_id == '17d':\n fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\nelif data_id == '22d':\n fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \nelif data_id == '27d':\n fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \nn_bursts_do = ds_do.num_bursts[0]\nn_bursts_fret = ds_fret.num_bursts[0]\nn_bursts_do, n_bursts_fret\nd_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)\nprint ('D-only fraction:', d_only_frac)\ndplot(ds_fret, hist2d_alex, scatter_alpha=0.1);\ndplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);\nExplanation: Burst search and selection\nEnd of explanation\ndef hsm_mode(s):\n \n Half-sample mode (HSM) estimator of `s`.\n `s` is a sample from a continuous distribution with a single peak.\n \n Reference:\n Bickel, Fruehwirth (2005). arXiv:math/0505419\n \n s = memoryview(np.sort(s))\n i1 = 0\n i2 = len(s)\n while i2 - i1 > 3:\n n = (i2 - i1) // 2\n w = [s[n-1+i+i1] - s[i+i1] for i in range(n)]\n i1 = w.index(min(w)) + i1\n i2 = i1 + n\n if i2 - i1 == 3:\n if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]:\n i2 -= 1\n elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]:\n i1 += 1\n else:\n i1 = i2 = i1 + 1\n return 0.5*(s[i1] + s[i2])\nE_pr_do_hsm = hsm_mode(ds_do.E[0])\nprint (\"%s: E_peak(HSM) = %.2f%%\" % (ds.ph_sel, E_pr_do_hsm*100))\nExplanation: Donor Leakage fit\nHalf-Sample Mode\nFit peak usng the mode computed with the half-sample algorithm (Bickel 2005).\nEnd of explanation\nE_fitter = bext.bursts_fitter(ds_do, weights=None)\nE_fitter.histogram(bins=np.arange(-0.2, 1, 0.03))\nE_fitter.fit_histogram(model=mfit.factory_gaussian())\nE_fitter.params\nres = E_fitter.fit_res[0]\nres.params.pretty_print()\nE_pr_do_gauss = res.best_values['center']\nE_pr_do_gauss\nExplanation: Gaussian Fit\nFit the histogram with a gaussian:\nEnd of explanation\nbandwidth = 0.03\nE_range_do = (-0.1, 0.15)\nE_ax = np.r_[-0.2:0.401:0.0002]\nE_fitter.calc_kde(bandwidth=bandwidth)\nE_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1])\nE_pr_do_kde = E_fitter.kde_max_pos[0]\nE_pr_do_kde\nExplanation: KDE maximum\nEnd of explanation\nmfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False)\nplt.axvline(E_pr_do_hsm, color='m', label='HSM')\nplt.axvline(E_pr_do_gauss, color='k', label='Gauss')\nplt.axvline(E_pr_do_kde, color='r', label='KDE')\nplt.xlim(0, 0.3)\nplt.legend()\nprint('Gauss: %.2f%%\\n KDE: %.2f%%\\n HSM: %.2f%%' % \n (E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100))\nExplanation: Leakage summary\nEnd of explanation\nnt_th1 = 50\ndplot(ds_fret, hist_size, which='all', add_naa=False)\nxlim(-0, 250)\nplt.axvline(nt_th1)\nTh_nt = np.arange(35, 120)\nnt_th = np.zeros(Th_nt.size)\nfor i, th in enumerate(Th_nt):\n ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)\n nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th\nplt.figure()\nplot(Th_nt, nt_th)\nplt.axvline(nt_th1)\nnt_mean = nt_th[np.where(Th_nt == nt_th1)][0]\nnt_mean\nExplanation: Burst size distribution\nEnd of explanation\nE_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')\nE_fitter = ds_fret.E_fitter\nE_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nE_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))\nE_fitter.fit_res[0].params.pretty_print()\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(E_fitter, ax=ax[0])\nmfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))\ndisplay(E_fitter.params*100)\nExplanation: Fret fit\nMax position of the Kernel Density Estimation (KDE):\nEnd of explanation\nds_fret.fit_E_m(weights='size')\nExplanation: Weighted mean of $E$ of each burst:\nEnd of explanation\nds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)\nExplanation: Gaussian fit (no weights):\nEnd of explanation\nds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')\nE_kde_w = E_fitter.kde_max_pos[0]\nE_gauss_w = E_fitter.params.loc[0, 'center']\nE_gauss_w_sig = E_fitter.params.loc[0, 'sigma']\nE_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))\nE_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr\nE_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr\nExplanation: Gaussian fit (using burst size as weights):\nEnd of explanation\nS_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)\nS_fitter = ds_fret.S_fitter\nS_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nS_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(S_fitter, ax=ax[0])\nmfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))\ndisplay(S_fitter.params*100)\nS_kde = S_fitter.kde_max_pos[0]\nS_gauss = S_fitter.params.loc[0, 'center']\nS_gauss_sig = S_fitter.params.loc[0, 'sigma']\nS_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))\nS_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr\nS_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr\nExplanation: Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE):\nEnd of explanation\nS = ds_fret.S[0]\nS_ml_fit = (S.mean(), S.std())\nS_ml_fit\nExplanation: The Maximum likelihood fit for a Gaussian population is the mean:\nEnd of explanation\nweights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)\nS_mean = np.dot(weights, S)/weights.sum()\nS_std_dev = np.sqrt(\n np.dot(weights, (S - S_mean)**2)/weights.sum())\nS_wmean_fit = [S_mean, S_std_dev]\nS_wmean_fit\nExplanation: Computing the weighted mean and weighted standard deviation we get:\nEnd of explanation\nsample = data_id\nExplanation: Save data to file\nEnd of explanation\nvariables = ('sample n_bursts_all n_bursts_do n_bursts_fret '\n 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '\n 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '\n 'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\\n')\nExplanation: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.\nEnd of explanation\nvariables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\nprint(variables_csv)\nprint(data_str)\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)\nExplanation: This is just a trick to format the different variables:\nEnd of explanation"}}},{"rowIdx":2122,"cells":{"Unnamed: 0":{"kind":"number","value":2122,"string":"2,122"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Benchmarking Performance and Scaling of Python Clustering Algorithms\nThere are a host of different clustering algorithms and implementations thereof for Python. The performance and scaling can depend as much on the implementation as the underlying algorithm. Obviously a well written implementation in C or C++ will beat a naive implementation on pure Python, but there is more to it than just that. The internals and data structures used can have a large impact on performance, and can even significanty change asymptotic performance. All of this means that, given some amount of data that you want to cluster your options as to algorithm and implementation maybe significantly constrained. I'm both lazy, and prefer empirical results for this sort of thing, so rather than analyzing the implementations and deriving asymptotic performance numbers for various implementations I'm just going to run everything and see what happens.\nTo begin with we need to get together all the clustering implementations, along with some plotting libraries so we can see what is going on once we've got data. Obviously this is not an exhaustive collection of clustering implementations, so if I've left off your favourite I apologise, but one has to draw a line somewhere.\nThe implementations being test are\nStep1: Now we need some benchmarking code at various dataset sizes. Because some clustering algorithms have performance that can vary quite a lot depending on the exact nature of the dataset we'll also need to run several times on randomly generated datasets of each size so as to get a better idea of the average case performance.\nWe also need to generalise over algorithms which don't necessarily all have the same API. We can resolve that by taking a clustering function, argument tuple and keywords dictionary to let us do semi-arbitrary calls (fortunately all the algorithms do at least take the dataset to cluster as the first parameter).\nFinally some algorithms scale poorly, and I don't want to spend forever doing clustering of random datasets so we'll cap the maximum time an algorithm can use; once it has taken longer than max time we'll just abort there and leave the remaining entries in our datasize by samples matrix unfilled.\nIn the end this all amounts to a fairly straightforward set of nested loops (over datasizes and number of samples) with calls to sklearn to generate mock data and the clustering function inside a timer. Add in some early abort and we're done.\nStep2: Comparison of all ten implementations\nNow we need a range of dataset sizes to test out our algorithm. Since the scaling performance is wildly different over the ten implementations we're going to look at it will be beneficial to have a number of very small dataset sizes, and increasing spacing as we get larger, spanning out to 32000 datapoints to cluster (to begin with). Numpy provides convenient ways to get this done via arange and vector multiplication. We'll start with step sizes of 500, then shift to steps of 1000 past 3000 datapoints, and finally steps of 2000 past 6000 datapoints.\nStep3: Now it is just a matter of running all the clustering algorithms via our benchmark function to collect up all the requsite data. This could be prettier, rolled up into functions appropriately, but sometimes brute force is good enough. More importantly (for me) since this can take a significant amount of compute time, I wanted to be able to comment out algorithms that were slow or I was uninterested in easily. Which brings me to a warning for you the reader and potential user of the notebook\nStep4: Now we need to plot the results so we can see what is going on. The catch is that we have several datapoints for each dataset size and ultimately we would like to try and fit a curve through all of it to get the general scaling trend. Fortunately seaborn comes to the rescue here by providing regplot which plots a regression through a dataset, supports higher order regression (we should probably use order two as most algorithms are effectively quadratic) and handles multiple datapoints for each x-value cleanly (using the x_estimator keyword to put a point at the mean and draw an error bar to cover the range of data).\nStep5: A few features stand out. First of all there appear to be essentially two classes of implementation, with DeBaCl being an odd case that falls in the middle. The fast implementations tend to be implementations of single linkage agglomerative clustering, K-means, and DBSCAN. The slow cases are largely from sklearn and include agglomerative clustering (in this case using Ward instead of single linkage).\nFor practical purposes this means that if you have much more than 10000 datapoints your clustering options are significantly constrained\nStep6: Again we can use seaborn to do curve fitting and plotting, exactly as before.\nStep7: Clearly something has gone woefully wrong with the curve fitting for the scipy single linkage implementation, but what exactly? If we look at the raw data we can see.\nStep8: It seems that at around 44000 points we hit a wall and the runtimes spiked. A hint is that I'm running this on a laptop with 8GB of RAM. Both single linkage algorithms use scipy.spatial.pdist to compute pairwise distances between points, which returns an array of shape (n(n-1)/2, 1) of doubles. A quick computation shows that that array of distances is quite large once we nave 44000 points\nStep9: If we assume that my laptop is keeping much other than that distance array in RAM then clearly we are going to spend time paging out the distance array to disk and back and hence we will see the runtimes increase dramatically as we become disk IO bound. If we just leave off the last element we can get a better idea of the curve, but keep in mind that the scipy single linkage implementation does not scale past a limit set by your available RAM.\nStep10: If we're looking for scaling we can write off the scipy single linkage implementation -- if even we didn't hit the RAM limit the $O(n^2)$ scaling is going to quickly catch up with us. Fastcluster has the same asymptotic scaling, but is heavily optimized to being the constant down much lower -- at this point it is still keeping close to the faster algorithms. It's asymtotics will still catch up with it eventually however.\nIn practice this is going to mean that for larger datasets you are going to be very constrained in what algorithms you can apply\nStep11: Now the some differences become clear. The asymptotic complexity starts to kick in with fastcluster failing to keep up. In turn HDBSCAN and DBSCAN, while having sub-$O(n^2)$ complexity, can't achieve $O(n \\log(n))$ at this dataset dimension, and start to curve upward precipitously. Finally it demonstrates again how much of a difference implementation can make\nStep12: Now we run that for each of our pre-existing datasets to extrapolate out predicted performance on the relevant dataset sizes. A little pandas wrangling later and we've produced a table of roughly how large a dataset you can tackle in each time frame with each implementation. I had to leave out the scipy KMeans timings because the noise in timing results caused the model to be unrealistic at larger data sizes. Note how the $O(n\\log n)$ algorithms utterly dominate here. In the meantime, for medium sizes data sets you can still get quite a lot done with HDBSCAN."},"code_prompt":{"kind":"string","value":"Python Code:\nimport hdbscan\nimport debacl\nimport fastcluster\nimport sklearn.cluster\nimport scipy.cluster\nimport sklearn.datasets\nimport numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nsns.set_context('poster')\nsns.set_palette('Paired', 10)\nsns.set_color_codes()\nExplanation: Benchmarking Performance and Scaling of Python Clustering Algorithms\nThere are a host of different clustering algorithms and implementations thereof for Python. The performance and scaling can depend as much on the implementation as the underlying algorithm. Obviously a well written implementation in C or C++ will beat a naive implementation on pure Python, but there is more to it than just that. The internals and data structures used can have a large impact on performance, and can even significanty change asymptotic performance. All of this means that, given some amount of data that you want to cluster your options as to algorithm and implementation maybe significantly constrained. I'm both lazy, and prefer empirical results for this sort of thing, so rather than analyzing the implementations and deriving asymptotic performance numbers for various implementations I'm just going to run everything and see what happens.\nTo begin with we need to get together all the clustering implementations, along with some plotting libraries so we can see what is going on once we've got data. Obviously this is not an exhaustive collection of clustering implementations, so if I've left off your favourite I apologise, but one has to draw a line somewhere.\nThe implementations being test are:\nSklearn (which implements several algorithms):\nK-Means clustering\nDBSCAN clustering\nAgglomerative clustering\nSpectral clustering\nAffinity Propagation\nScipy (which provides basic algorithms):\nK-Means clustering\nAgglomerative clustering\nFastcluster (which provides very fast agglomerative clustering in C++)\nDeBaCl (Density Based Clustering; similar to a mix of DBSCAN and Agglomerative)\nHDBSCAN (A robust hierarchical version of DBSCAN)\nObviously a major factor in performance will be the algorithm itself. Some algorithms are simply slower -- often, but not always, because they are doing more work to provide a better clustering.\nEnd of explanation\ndef benchmark_algorithm(dataset_sizes, cluster_function, function_args, function_kwds,\n dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=2):\n \n # Initialize the result with NaNs so that any unfilled entries \n # will be considered NULL when we convert to a pandas dataframe at the end\n result = np.nan * np.ones((len(dataset_sizes), sample_size))\n for index, size in enumerate(dataset_sizes):\n for s in range(sample_size):\n # Use sklearns make_blobs to generate a random dataset with specified size\n # dimension and number of clusters\n data, labels = sklearn.datasets.make_blobs(n_samples=size, \n n_features=dataset_dimension, \n centers=dataset_n_clusters)\n \n # Start the clustering with a timer\n start_time = time.time()\n cluster_function(data, *function_args, **function_kwds)\n time_taken = time.time() - start_time\n \n # If we are taking more than max_time then abort -- we don't\n # want to spend excessive time on slow algorithms\n if time_taken > max_time:\n result[index, s] = time_taken\n return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), \n result.flatten()]).T, columns=['x','y'])\n else:\n result[index, s] = time_taken\n \n # Return the result as a dataframe for easier handling with seaborn afterwards\n return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), \n result.flatten()]).T, columns=['x','y'])\nExplanation: Now we need some benchmarking code at various dataset sizes. Because some clustering algorithms have performance that can vary quite a lot depending on the exact nature of the dataset we'll also need to run several times on randomly generated datasets of each size so as to get a better idea of the average case performance.\nWe also need to generalise over algorithms which don't necessarily all have the same API. We can resolve that by taking a clustering function, argument tuple and keywords dictionary to let us do semi-arbitrary calls (fortunately all the algorithms do at least take the dataset to cluster as the first parameter).\nFinally some algorithms scale poorly, and I don't want to spend forever doing clustering of random datasets so we'll cap the maximum time an algorithm can use; once it has taken longer than max time we'll just abort there and leave the remaining entries in our datasize by samples matrix unfilled.\nIn the end this all amounts to a fairly straightforward set of nested loops (over datasizes and number of samples) with calls to sklearn to generate mock data and the clustering function inside a timer. Add in some early abort and we're done.\nEnd of explanation\ndataset_sizes = np.hstack([np.arange(1, 6) * 500, np.arange(3,7) * 1000, np.arange(4,17) * 2000])\nExplanation: Comparison of all ten implementations\nNow we need a range of dataset sizes to test out our algorithm. Since the scaling performance is wildly different over the ten implementations we're going to look at it will be beneficial to have a number of very small dataset sizes, and increasing spacing as we get larger, spanning out to 32000 datapoints to cluster (to begin with). Numpy provides convenient ways to get this done via arange and vector multiplication. We'll start with step sizes of 500, then shift to steps of 1000 past 3000 datapoints, and finally steps of 2000 past 6000 datapoints.\nEnd of explanation\nk_means = sklearn.cluster.KMeans(10)\nk_means_data = benchmark_algorithm(dataset_sizes, k_means.fit, (), {})\ndbscan = sklearn.cluster.DBSCAN(eps=1.25)\ndbscan_data = benchmark_algorithm(dataset_sizes, dbscan.fit, (), {})\nscipy_k_means_data = benchmark_algorithm(dataset_sizes, \n scipy.cluster.vq.kmeans, (10,), {})\nscipy_single_data = benchmark_algorithm(dataset_sizes, \n scipy.cluster.hierarchy.single, (), {})\nfastclust_data = benchmark_algorithm(dataset_sizes, \n fastcluster.linkage_vector, (), {})\nhdbscan_ = hdbscan.HDBSCAN()\nhdbscan_data = benchmark_algorithm(dataset_sizes, hdbscan_.fit, (), {})\ndebacl_data = benchmark_algorithm(dataset_sizes, \n debacl.geom_tree.geomTree, (5, 5), {'verbose':False})\nagglomerative = sklearn.cluster.AgglomerativeClustering(10)\nagg_data = benchmark_algorithm(dataset_sizes, \n agglomerative.fit, (), {}, sample_size=4)\nspectral = sklearn.cluster.SpectralClustering(10)\nspectral_data = benchmark_algorithm(dataset_sizes, \n spectral.fit, (), {}, sample_size=6)\naffinity_prop = sklearn.cluster.AffinityPropagation()\nap_data = benchmark_algorithm(dataset_sizes, \n affinity_prop.fit, (), {}, sample_size=3)\nExplanation: Now it is just a matter of running all the clustering algorithms via our benchmark function to collect up all the requsite data. This could be prettier, rolled up into functions appropriately, but sometimes brute force is good enough. More importantly (for me) since this can take a significant amount of compute time, I wanted to be able to comment out algorithms that were slow or I was uninterested in easily. Which brings me to a warning for you the reader and potential user of the notebook: this next step is very expensive. We are running ten different clustering algorithms multiple times each on twenty two different dataset sizes -- and some of the clustering algorithms are slow (we are capping out at forty five seconds per run). That means that the next cell can take an hour or more to run. That doesn't mean \"Don't try this at home\" (I actually encourage you to try this out yourself and play with dataset parameters and clustering parameters) but it does mean you should be patient if you're going to!\nEnd of explanation\nsns.regplot(x='x', y='y', data=k_means_data, order=2, \n label='Sklearn K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=dbscan_data, order=2, \n label='Sklearn DBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=scipy_k_means_data, order=2, \n label='Scipy K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=hdbscan_data, order=2, \n label='HDBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=fastclust_data, order=2, \n label='Fastcluster Single Linkage', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=scipy_single_data, order=2, \n label='Scipy Single Linkage', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=debacl_data, order=2, \n label='DeBaCl Geom Tree', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=spectral_data, order=2, \n label='Sklearn Spectral', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=agg_data, order=2, \n label='Sklearn Agglomerative', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=ap_data, order=2, \n label='Sklearn Affinity Propagation', x_estimator=np.mean)\nplt.gca().axis([0, 34000, 0, 120])\nplt.gca().set_xlabel('Number of data points')\nplt.gca().set_ylabel('Time taken to cluster (s)')\nplt.title('Performance Comparison of Clustering Implementations')\nplt.legend()\nExplanation: Now we need to plot the results so we can see what is going on. The catch is that we have several datapoints for each dataset size and ultimately we would like to try and fit a curve through all of it to get the general scaling trend. Fortunately seaborn comes to the rescue here by providing regplot which plots a regression through a dataset, supports higher order regression (we should probably use order two as most algorithms are effectively quadratic) and handles multiple datapoints for each x-value cleanly (using the x_estimator keyword to put a point at the mean and draw an error bar to cover the range of data).\nEnd of explanation\nlarge_dataset_sizes = np.arange(1,16) * 4000\nhdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')\nlarge_hdbscan_boruvka_data = benchmark_algorithm(large_dataset_sizes, \n hdbscan_boruvka.fit, (), {}, \n max_time=90, sample_size=1)\nk_means = sklearn.cluster.KMeans(10)\nlarge_k_means_data = benchmark_algorithm(large_dataset_sizes, \n k_means.fit, (), {}, \n max_time=90, sample_size=1)\ndbscan = sklearn.cluster.DBSCAN(eps=1.25, min_samples=5)\nlarge_dbscan_data = benchmark_algorithm(large_dataset_sizes, \n dbscan.fit, (), {}, \n max_time=90, sample_size=1)\nlarge_fastclust_data = benchmark_algorithm(large_dataset_sizes, \n fastcluster.linkage_vector, (), {}, \n max_time=90, sample_size=1)\nlarge_scipy_k_means_data = benchmark_algorithm(large_dataset_sizes, \n scipy.cluster.vq.kmeans, (10,), {}, \n max_time=90, sample_size=1)\nlarge_scipy_single_data = benchmark_algorithm(large_dataset_sizes, \n scipy.cluster.hierarchy.single, (), {}, \n max_time=90, sample_size=1)\nExplanation: A few features stand out. First of all there appear to be essentially two classes of implementation, with DeBaCl being an odd case that falls in the middle. The fast implementations tend to be implementations of single linkage agglomerative clustering, K-means, and DBSCAN. The slow cases are largely from sklearn and include agglomerative clustering (in this case using Ward instead of single linkage).\nFor practical purposes this means that if you have much more than 10000 datapoints your clustering options are significantly constrained: sklearn spectral, agglomerative and affinity propagation are going to take far too long. DeBaCl may still be an option, but given that the hdbscan library provides \"robust single linkage clustering\" equivalent to what DeBaCl is doing (and with effectively the same runtime as hdbscan as it is a subset of that algorithm) it is probably not the best choice for large dataset sizes.\nSo let's drop out those slow algorithms so we can scale out a little further and get a closer look at the various algorithms that managed 32000 points in under thirty seconds. There is almost undoubtedly more to learn as we get ever larger dataset sizes.\nComparison of fast implementations\nLet's compare the six fastest implementations now. We can scale out a little further as well; based on the curves above it looks like we should be able to comfortably get to 60000 data points without taking much more than a minute per run. We can also note that most of these implementations weren't that noisy so we can get away with a single run per dataset size.\nEnd of explanation\nsns.regplot(x='x', y='y', data=large_k_means_data, order=2, \n label='Sklearn K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_dbscan_data, order=2, \n label='Sklearn DBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2, \n label='Scipy K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2, \n label='HDBSCAN Boruvka', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_fastclust_data, order=2, \n label='Fastcluster Single Linkage', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_scipy_single_data, order=2, \n label='Scipy Single Linkage', x_estimator=np.mean)\nplt.gca().axis([0, 64000, 0, 150])\nplt.gca().set_xlabel('Number of data points')\nplt.gca().set_ylabel('Time taken to cluster (s)')\nplt.title('Performance Comparison of Fastest Clustering Implementations')\nplt.legend()\nExplanation: Again we can use seaborn to do curve fitting and plotting, exactly as before.\nEnd of explanation\nlarge_scipy_single_data.tail(10)\nExplanation: Clearly something has gone woefully wrong with the curve fitting for the scipy single linkage implementation, but what exactly? If we look at the raw data we can see.\nEnd of explanation\nsize_of_array = 44000 * (44000 - 1) / 2 # from pdist documentation\nbytes_in_array = size_of_array * 8 # Since doubles use 8 bytes\ngigabytes_used = bytes_in_array / (1024.0 ** 3) # divide out to get the number of GB\ngigabytes_used\nExplanation: It seems that at around 44000 points we hit a wall and the runtimes spiked. A hint is that I'm running this on a laptop with 8GB of RAM. Both single linkage algorithms use scipy.spatial.pdist to compute pairwise distances between points, which returns an array of shape (n(n-1)/2, 1) of doubles. A quick computation shows that that array of distances is quite large once we nave 44000 points:\nEnd of explanation\nsns.regplot(x='x', y='y', data=large_k_means_data, order=2, \n label='Sklearn K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_dbscan_data, order=2, \n label='Sklearn DBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2, \n label='Scipy K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2, \n label='HDBSCAN Boruvka', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_fastclust_data, order=2, \n label='Fastcluster Single Linkage', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=large_scipy_single_data[:8], order=2, \n label='Scipy Single Linkage', x_estimator=np.mean)\nplt.gca().axis([0, 64000, 0, 150])\nplt.gca().set_xlabel('Number of data points')\nplt.gca().set_ylabel('Time taken to cluster (s)')\nplt.title('Performance Comparison of Fastest Clustering Implementations')\nplt.legend()\nExplanation: If we assume that my laptop is keeping much other than that distance array in RAM then clearly we are going to spend time paging out the distance array to disk and back and hence we will see the runtimes increase dramatically as we become disk IO bound. If we just leave off the last element we can get a better idea of the curve, but keep in mind that the scipy single linkage implementation does not scale past a limit set by your available RAM.\nEnd of explanation\nhuge_dataset_sizes = np.arange(1,11) * 20000\nk_means = sklearn.cluster.KMeans(10)\nhuge_k_means_data = benchmark_algorithm(huge_dataset_sizes, \n k_means.fit, (), {}, \n max_time=120, sample_size=2, dataset_dimension=10)\ndbscan = sklearn.cluster.DBSCAN(eps=1.5)\nhuge_dbscan_data = benchmark_algorithm(huge_dataset_sizes, \n dbscan.fit, (), {},\n max_time=120, sample_size=2, dataset_dimension=10)\nhuge_scipy_k_means_data = benchmark_algorithm(huge_dataset_sizes, \n scipy.cluster.vq.kmeans, (10,), {}, \n max_time=120, sample_size=2, dataset_dimension=10)\nhdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')\nhuge_hdbscan_data = benchmark_algorithm(huge_dataset_sizes, \n hdbscan_boruvka.fit, (), {}, \n max_time=240, sample_size=4, dataset_dimension=10)\nhuge_fastcluster_data = benchmark_algorithm(huge_dataset_sizes, \n fastcluster.linkage_vector, (), {}, \n max_time=240, sample_size=2, dataset_dimension=10)\nsns.regplot(x='x', y='y', data=huge_k_means_data, order=2, \n label='Sklearn K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=huge_dbscan_data, order=2, \n label='Sklearn DBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=huge_scipy_k_means_data, order=2, \n label='Scipy K-Means', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=huge_hdbscan_data, order=2, \n label='HDBSCAN', x_estimator=np.mean)\nsns.regplot(x='x', y='y', data=huge_fastcluster_data, order=2, \n label='Fastcluster', x_estimator=np.mean)\nplt.gca().axis([0, 200000, 0, 240])\nplt.gca().set_xlabel('Number of data points')\nplt.gca().set_ylabel('Time taken to cluster (s)')\nplt.title('Performance Comparison of K-Means and DBSCAN')\nplt.legend()\nExplanation: If we're looking for scaling we can write off the scipy single linkage implementation -- if even we didn't hit the RAM limit the $O(n^2)$ scaling is going to quickly catch up with us. Fastcluster has the same asymptotic scaling, but is heavily optimized to being the constant down much lower -- at this point it is still keeping close to the faster algorithms. It's asymtotics will still catch up with it eventually however.\nIn practice this is going to mean that for larger datasets you are going to be very constrained in what algorithms you can apply: if you get enough datapoints only K-Means, DBSCAN, and HDBSCAN will be left. This is somewhat disappointing, paritcularly as K-Means is not a particularly good clustering algorithm, paricularly for exploratory data analysis.\nWith this in mind it is worth looking at how these last several implementations perform at much larger sizes, to see, for example, when fastscluster starts to have its asymptotic complexity start to pull it away.\nComparison of high performance implementations\nAt this point we can scale out to 200000 datapoints easily enough, so let's push things at least that far so we can start to really see scaling effects.\nEnd of explanation\nimport statsmodels.formula.api as sm\ntime_samples = [1000, 2000, 5000, 10000, 25000, 50000, 75000, 100000, 250000, 500000, 750000,\n 1000000, 2500000, 5000000, 10000000, 50000000, 100000000, 500000000, 1000000000]\ndef get_timing_series(data, quadratic=True):\n if quadratic:\n data['x_squared'] = data.x**2\n model = sm.ols('y ~ x + x_squared', data=data).fit()\n predictions = [model.params.dot([1.0, i, i**2]) for i in time_samples]\n return pd.Series(predictions, index=pd.Index(time_samples))\n else: # assume n log(n)\n data['xlogx'] = data.x * np.log(data.x)\n model = sm.ols('y ~ x + xlogx', data=data).fit()\n predictions = [model.params.dot([1.0, i, i*np.log(i)]) for i in time_samples]\n return pd.Series(predictions, index=pd.Index(time_samples))\nExplanation: Now the some differences become clear. The asymptotic complexity starts to kick in with fastcluster failing to keep up. In turn HDBSCAN and DBSCAN, while having sub-$O(n^2)$ complexity, can't achieve $O(n \\log(n))$ at this dataset dimension, and start to curve upward precipitously. Finally it demonstrates again how much of a difference implementation can make: the sklearn implementation of K-Means is far better than the scipy implementation. Since HDBSCAN clustering is a lot better than K-Means (unless you have good reasons to assume that the clusters partition your data and are all drawn from Gaussian distributions) and the scaling is still pretty good I would suggest that unless you have a truly stupendous amount of data you wish to cluster then the HDBSCAN implementation is a good choice.\nBut should I get a coffee?\nSo we know which implementations scale and which don't; a more useful thing to know in practice is, given a dataset, what can I run interactively? What can I run while I go and grab some coffee? How about a run over lunch? What if I'm willing to wait until I get in tomorrow morning? Each of these represent significant breaks in productivity -- once you aren't working interactively anymore your productivity drops measurably, and so on.\nWe can build a table for this. To start we'll need to be able to approximate how long a given clustering implementation will take to run. Fortunately we already gathered a lot of that data; if we load up the statsmodels package we can fit the data (with a quadratic or $n\\log n$ fit depending on the implementation; DBSCAN and HDBSCAN get caught here, since while they are under $O(n^2)$ scaling, they don't have an easily described model, so I'll model them as $n^2$ for now) and use the resulting model to make our predictions. Obviously this has some caveats: if you fill your RAM with a distance matrix your runtime isn't going to fit the curve.\nI've hand built a time_samples list to give a reasonable set of potential data sizes that are nice and human readable. After that we just need a function to fit and build the curves.\nEnd of explanation\nap_timings = get_timing_series(ap_data)\nspectral_timings = get_timing_series(spectral_data)\nagg_timings = get_timing_series(agg_data)\ndebacl_timings = get_timing_series(debacl_data)\nfastclust_timings = get_timing_series(large_fastclust_data.ix[:10,:].copy())\nscipy_single_timings = get_timing_series(large_scipy_single_data.ix[:10,:].copy())\nhdbscan_boruvka = get_timing_series(huge_hdbscan_data, quadratic=True)\n#scipy_k_means_timings = get_timing_series(huge_scipy_k_means_data, quadratic=False)\ndbscan_timings = get_timing_series(huge_dbscan_data, quadratic=True)\nk_means_timings = get_timing_series(huge_k_means_data, quadratic=False)\ntiming_data = pd.concat([ap_timings, spectral_timings, agg_timings, debacl_timings, \n scipy_single_timings, fastclust_timings, hdbscan_boruvka, \n dbscan_timings, k_means_timings\n ], axis=1)\ntiming_data.columns=['AffinityPropagation', 'Spectral', 'Agglomerative',\n 'DeBaCl', 'ScipySingleLinkage', 'Fastcluster',\n 'HDBSCAN', 'DBSCAN', 'SKLearn KMeans'\n ]\ndef get_size(series, max_time):\n return series.index[series < max_time].max()\ndatasize_table = pd.concat([\n timing_data.apply(get_size, max_time=30),\n timing_data.apply(get_size, max_time=300),\n timing_data.apply(get_size, max_time=3600),\n timing_data.apply(get_size, max_time=8*3600)\n ], axis=1)\ndatasize_table.columns=('Interactive', 'Get Coffee', 'Over Lunch', 'Overnight')\ndatasize_table\nExplanation: Now we run that for each of our pre-existing datasets to extrapolate out predicted performance on the relevant dataset sizes. A little pandas wrangling later and we've produced a table of roughly how large a dataset you can tackle in each time frame with each implementation. I had to leave out the scipy KMeans timings because the noise in timing results caused the model to be unrealistic at larger data sizes. Note how the $O(n\\log n)$ algorithms utterly dominate here. In the meantime, for medium sizes data sets you can still get quite a lot done with HDBSCAN.\nEnd of explanation"}}},{"rowIdx":2123,"cells":{"Unnamed: 0":{"kind":"number","value":2123,"string":"2,123"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Calculates and plots the NIWA SOI\nThe NIWA SOI is calculated using the Troup method, where the climatological period is taken to be 1941-2010\nStep1: imports\nStep2: defines a function to get the BoM SLP data for Tahiti or Darwin\nStep3: set up the paths\nStep4: set up the plotting parameters\nStep5: set up proxies\nStep6: preliminary\nStep7: Get the data for Tahiti\nStep8: Get the data for Darwin\nStep9: defines climatological period here\nStep10: calculates the climatology\nStep11: Calculates the SOI\nStep12: writes the CSV file\nStep13: stacks everything and set a Datetime index\nStep14: choose the period of display\nStep15: 3 months rolling mean, and some data munging\nStep16: plots the SOI, lots of boilerplate here\nStep17: saves the figure"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nExplanation: Calculates and plots the NIWA SOI\nThe NIWA SOI is calculated using the Troup method, where the climatological period is taken to be 1941-2010:\nThus, if T and D are the monthly pressures at Tahiti and Darwin, respectively, and Tc and Dc the climatological monthly pressures, then:\nSOI = [ (T – Tc) – (D – Dc) ] / [ StDev (T – D) ]\nSo the numerator is the anomalous Tahiti-Darwin difference for the month in question, and the denominator is the standard deviation of\nthe Tahiti-Darwin differences for that month over the 1941-2010 climatological period. I then round the answer to the nearest tenth\n(ie, 1 decimal place).\nEnd of explanation\nimport os\nimport sys\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy import ma\nimport urllib2\nimport requests\nfrom matplotlib.dates import YearLocator, MonthLocator, DateFormatter\nfrom dateutil import parser as dparser\nfrom datetime import datetime, timedelta\nimport subprocess\nExplanation: imports\nEnd of explanation\ndef get_BOM_MSLP(station='tahiti'):\n url = \"ftp://ftp.bom.gov.au/anon/home/ncc/www/sco/soi/{}mslp.html\".format(station)\n r = urllib2.urlopen(url)\n if r.code == 200:\n print(\"streaming MSLP data for {} successful\\n\".format(station))\n else:\n print(\"!!! unable to stream MSLP data for {}\\n\".format(station))\n sys.exit(1)\n data = r.readlines()\n r.close()\n fout = open('./{}_text'.format(station), 'w')\n if station == 'tahiti':\n data = data[15:-3]\n else:\n data = data[14:-3]\n fout.writelines(data)\n fout.close()\n data = pd.read_table('./{}_text'.format(station),sep='\\s*', \\\n engine='python', na_values='*', index_col=['Year'])\n subprocess.Popen([\"rm {}*\".format(station)], shell=True, stdout=True).communicate()\n return data\nExplanation: defines a function to get the BoM SLP data for Tahiti or Darwin\nEnd of explanation\n# figure\nfpath = os.path.join(os.environ['HOME'], 'operational/ICU/indices/figures')\n# csv file\nopath = os.path.join(os.environ['HOME'], 'operational/ICU/indices/data')\nExplanation: set up the paths\nEnd of explanation\nyears = YearLocator()\nmonths = MonthLocator()\nmFMT = DateFormatter('%b')\nyFMT = DateFormatter('\\n\\n%Y')\nmpl.rcParams['xtick.labelsize'] = 12\nmpl.rcParams['ytick.labelsize'] = 12\nmpl.rcParams['axes.titlesize'] = 14\nmpl.rcParams['xtick.direction'] = 'out'\nmpl.rcParams['ytick.direction'] = 'out'\nmpl.rcParams['xtick.major.size'] = 5\nmpl.rcParams['ytick.major.size'] = 5\nmpl.rcParams['xtick.minor.size'] = 2\nExplanation: set up the plotting parameters\nEnd of explanation\nproxies = {}\n#proxies['http'] = 'url:port'\n#proxies['https'] = 'url:port'\n#proxies['ftp'] = 'url:port'\n### use urllib2 to open remote http files\nurllib2proxy = urllib2.ProxyHandler(proxies)\nopener = urllib2.build_opener(urllib2proxy)\nurllib2.install_opener(opener)\nExplanation: set up proxies\nEnd of explanation\nurl = \"http://www.bom.gov.au/climate/current/soihtm1.shtml\"\nr = requests.get(url, proxies=proxies)\nurlcontent = r.content\ndate_update = urlcontent[urlcontent.find(\"Next SOI update expected:\"):\\\n urlcontent.find(\"Next SOI update expected:\")+60]\ndate_update = date_update.split(\"\\n\")[0]\nprint date_update\nprint(10*'='+'\\n')\nExplanation: preliminary: wet get the date for which the next update is likely to be made available\nEnd of explanation\ntahitidf = get_BOM_MSLP(station='tahiti')\nExplanation: Get the data for Tahiti\nEnd of explanation\ndarwindf = get_BOM_MSLP(station='darwin')\nExplanation: Get the data for Darwin\nEnd of explanation\nclim_start = 1941\nclim_end = 2010\nclim = \"{}_{}\".format(clim_start, clim_end)\nExplanation: defines climatological period here\nEnd of explanation\ntahiti_cli = tahitidf.loc[clim_start:clim_end,:]\ndarwin_cli = darwindf.loc[clim_start:clim_end,:]\ntahiti_mean = tahiti_cli.mean(0)\ndarwin_mean = darwin_cli.mean(0)\nExplanation: calculates the climatology\nEnd of explanation\nsoi = ((tahitidf - tahiti_mean) - (darwindf - darwin_mean)) / ((tahiti_cli - darwin_cli).std(0))\nsoi = np.round(soi, 1)\nsoi.tail()\nExplanation: Calculates the SOI\nEnd of explanation\nsoi.to_csv(os.path.join(opath, \"NICO_NIWA_SOI_{}.csv\".format(clim)))\nExplanation: writes the CSV file\nEnd of explanation\nts_soi = pd.DataFrame(soi.stack())\ndates = []\nfor i in xrange(len(ts_soi)):\n dates.append(dparser.parse(\"{}-{}-1\".format(ts_soi.index.get_level_values(0)[i], ts_soi.index.get_level_values(1)[i])))\nts_soi.index = dates\nts_soi.columns = [['soi']]\nts_soi.tail()\nExplanation: stacks everything and set a Datetime index\nEnd of explanation\nts_soi = ts_soi.truncate(before=\"2012/1/1\")\nExplanation: choose the period of display\nEnd of explanation\nts_soi[['soirm']] = pd.rolling_mean(ts_soi, 3, center=True)\ndates = np.array(ts_soi.index.to_pydatetime())\nwidths=np.array([(dates[j+1]-dates[j]).days for j in range(len(dates)-1)] + [30])\n### middle of the month for the 3 month running mean plot\ndatesrm = np.array([x + timedelta(days=15) for x in dates])\nsoi = ts_soi['soi'].values\nsoim = ts_soi['soirm'].values\nExplanation: 3 months rolling mean, and some data munging\nEnd of explanation\nfig, ax = plt.subplots(figsize=(14,7))\nfig.subplots_adjust(bottom=0.15)\nax.bar(dates[soi>=0],soi[soi>=0], width=widths[soi>=0], facecolor='steelblue', \\\n alpha=.8, edgecolor='steelblue', lw=2)\nax.bar(dates[soi<0],soi[soi<0], width=widths[soi<0], facecolor='coral', \\\n alpha=.8, edgecolor='coral', lw=2)\nax.plot(datesrm,soim, lw=3, color='k', label='3-mth mean')\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_formatter(mFMT)\nax.xaxis.set_major_formatter(yFMT)\nax.axhline(0, color='k')\n#ax.set_frame_on(False)\nlabels = ax.get_xminorticklabels()\nfor label in labels:\n label.set_fontsize(14)\n label.set_rotation(90)\nlabels = ax.get_xmajorticklabels()\nfor label in labels:\n label.set_fontsize(18)\nlabels = ax.get_yticklabels()\nfor label in labels:\n label.set_fontsize(18)\nax.grid(linestyle='--')\nax.xaxis.grid(True, which='both')\nax.legend(loc=3, fancybox=True)\nax.set_ylim(-3., 3.)\nax.set_ylabel('Monthly SOI (NIWA)', fontsize=14, backgroundcolor=\"w\")\nax.text(dates[0],3.2,\"NIWA SOI\", fontsize=24, fontweight='bold')\nax.text(dates[-5], 2.8, \"%s NIWA Ltd.\" % (u'\\N{Copyright Sign}'))\ntextBm = \"%s = %+4.1f\" % (dates[-1].strftime(\"%B %Y\"), soi[-1])\ntextBs = \"%s to %s = %+4.1f\" % (dates[-3].strftime(\"%b %Y\"), dates[-1].strftime(\"%b %Y\"), soi[-3:].mean())\nax.text(datesrm[8],3.2,\"Latest values: %s, %s\" % (textBm, textBs), fontsize=16)\nax.text(datesrm[0],2.8,date_update, fontsize=14)\nExplanation: plots the SOI, lots of boilerplate here\nEnd of explanation\nfig.savefig(os.path.join(fpath, \"NICO_NIWA_SOI_{}clim.png\".format(clim)), dpi=200)\nExplanation: saves the figure\nEnd of explanation"}}},{"rowIdx":2124,"cells":{"Unnamed: 0":{"kind":"number","value":2124,"string":"2,124"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n \"Open\nTFX Feature Selection Component\nYou may find the source code for the same here\nThis example demonstrate the use of feature selection component. This project allows the user to select different algorithms for performing feature selection on datasets artifacts in TFX pipelines\nBase code taken from: https://github.com/tensorflow/tfx/blob/master/docs/tutorials/tfx/components_keras.ipynb\nSetup\nInstall TFX\nNote: In Google Colab, because of package updates, the first time you run this cell you must restart the runtime (Runtime > Restart runtime ...).\nEnd of explanation\nimport os\nimport pprint\nimport tempfile\nimport urllib\nimport absl\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\ntf.get_logger().propagate = False\nimport importlib\npp = pprint.PrettyPrinter()\nfrom tfx import v1 as tfx\nimport importlib\nfrom tfx.components import CsvExampleGen\nfrom tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\n%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip\n# importing the feature selection component\nfrom component import FeatureSelection\n# This is the root directory for your TFX pip package installation.\n_tfx_root = tfx.__path__[0]\nExplanation: Import packages\nImporting the necessary packages, including the standard TFX component classes\nEnd of explanation\n# getting the dataset\n_data_root = tempfile.mkdtemp(prefix='tfx-data')\nDATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/penguin/data/labelled/penguins_processed.csv'\n \n_data_filepath = os.path.join(_data_root, \"data.csv\")\nurllib.request.urlretrieve(DATA_PATH, _data_filepath)\nExplanation: Palmer Penguins example pipeline\nDownload Example Data\nWe download the example dataset for use in our TFX pipeline.\nThe dataset we're using is the Palmer Penguins dataset which is also used in other\nTFX examples.\nThere are four numeric features in this dataset:\nculmen_length_mm\nculmen_depth_mm\nflipper_length_mm\nbody_mass_g\nAll features were already normalized to have range [0,1]. We will build a\nthat selects 2 features to be eliminated from the dataset in other to improve the performance of the mode in predicting the species of penguins.\nEnd of explanation\ncontext = InteractiveContext()\n#create and run exampleGen component\nexample_gen = CsvExampleGen(input_base=_data_root )\ncontext.run(example_gen)\n#create and run statisticsGen component\nstatistics_gen = tfx.components.StatisticsGen(\n examples=example_gen.outputs['examples'])\ncontext.run(statistics_gen)\n# using the feature selection component\n#feature selection component\nfeature_selector = FeatureSelection(orig_examples = example_gen.outputs['examples'],\n module_file='example.modules.penguins_module')\ncontext.run(feature_selector)\n# Display Selected Features\ncontext.show(feature_selector.outputs['feature_selection']._artifacts[0])\nExplanation: Run TFX Components\nIn the cells that follow, we create TFX components one-by-one and generates example using exampleGen component.\nEnd of explanation\ncontext.show(feature_selector.outputs['updated_data']._artifacts[0])\nExplanation: As seen above, .selected_features contains the features selected after running the component with the speified parameters.\nTo get the info about updated Example artifact, one can view it as follows:\nEnd of explanation"}}},{"rowIdx":2125,"cells":{"Unnamed: 0":{"kind":"number","value":2125,"string":"2,125"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n There are many talks tomorrow at the CSV Conf. I want to cluster the talks\nStep1: Document representation\nStep2: Preprocess text\nStep3: Cluster the talks\nI refer to Jörn Hees (2015) to generate the hierarchical clustering and dendrogram using scipy.cluster.hierarchy.dendrogram."},"code_prompt":{"kind":"string","value":"Python Code:\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nwebsite_to_parse = \"https://csvconf.com/speakers/\"\n# Save HTML to soup\nhtml_data = requests.get(website_to_parse).text\nsoup = BeautifulSoup(html_data, \"html5lib\")\ndoc = soup.find_all(\"table\", attrs={\"class\", \"speakers\"})[1]\nnames = doc.find_all(\"span\", attrs={\"class\": \"name\"})\nnames = [t.getText().strip() for t in names]\ntitles = doc.find_all(\"p\", attrs={\"class\": \"title\"})\ntitles = [t.getText().strip() for t in titles]\nabstracts = doc.find_all(\"p\", attrs={\"class\": \"abstract\"})\nabstracts = [t.getText().strip() for t in abstracts]\nprint(len(names), len(titles), len(abstracts))\nExplanation: There are many talks tomorrow at the CSV Conf. I want to cluster the talks:\nGet html\nGet talk titles\nMatch titles with description (to get more text)\nModel with TF-IDF\nFind clusters\nGet HTML\nEnd of explanation\ndf = pd.DataFrame.from_dict({\n 'names':names,\n 'titles':titles,\n 'abstracts':abstracts})\n# Combine text of title and abstract\ndf['document'] = df['titles'] + \" \" + df['abstracts']\n# Add index\ndf['index'] = df.index\nExplanation: Document representation\nEnd of explanation\nimport sys\nsys.path.append(\"/Users/csiu/repo/kick/src/python\")\nimport sim_doc as sim_doc\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.utils.extmath import randomized_svd\n## Preprocess\n_ = sim_doc.preprocess_data(df)\n## TF-IDF\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(df['doc_processed'])\nExplanation: Preprocess text\nEnd of explanation\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram, linkage\n# generate the linkage matrix\nZ = linkage(X.toarray(), 'ward')\n# calculate full dendrogram\nplt.figure(figsize=(25, 4))\nplt.title('Hierarchical Clustering of CSV,Conf,V3 Non-Keynote talks')\nplt.xlabel('')\nplt.ylabel('Distance')\ndn = dendrogram(\n Z,\n leaf_rotation=270, # rotates the x axis labels\n leaf_font_size=12, # font size for the x axis labels\n labels = df[\"titles\"].tolist(),\n color_threshold=1.45, # where to cut for clusters\n above_threshold_color='#bcbddc'\n)\nplt.show()\nExplanation: Cluster the talks\nI refer to Jörn Hees (2015) to generate the hierarchical clustering and dendrogram using scipy.cluster.hierarchy.dendrogram.\nEnd of explanation"}}},{"rowIdx":2126,"cells":{"Unnamed: 0":{"kind":"number","value":2126,"string":"2,126"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Chapter 33. Nonparametric permutation testing\nStep1: Figure 33.1\nStep2: 33.3\nUsing the same fig/data as 33.1\nStep3: 33.5/6\nThese are generated in chap 34.\n33.8\nStep5: 33.9\nRather than do perm testing on the spectrogram I'll just write the code below using the data we generated above."},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nfrom scipy.signal import convolve2d\nimport skimage.measure\nExplanation: Chapter 33. Nonparametric permutation testing\nEnd of explanation\nx = np.arange(-5,5, .01)\npdf = norm.pdf(x)\ndata = np.random.randn(1000)\nfig, ax = plt.subplots(1,2, sharex='all')\nax[0].plot(x, pdf)\nax[0].set(ylabel='PDF', xlabel='Statistical value')\nax[1].hist(data, bins=50)\nax[1].set(ylabel='counts')\nfig.tight_layout()\nExplanation: Figure 33.1\nEnd of explanation\nprint(f'p_n = {sum(data>2)/1000:.3f}')\nprint(f'p_z = {1-norm.cdf(2):.3f}')\nExplanation: 33.3\nUsing the same fig/data as 33.1\nEnd of explanation\nnp.random.seed(1)\n# create random smoothed map\nxi, yi = np.meshgrid(np.arange(-10, 11), np.arange(-10, 11))\nzi = xi**2 + yi**2\nzi = 1 - (zi/np.max(zi))\nmap = convolve2d(np.random.randn(100,100), zi,'same')\n# threshold at arb value\nmapt = map.copy()\nmapt[(np.abs(map) point\n gtree = model.df.loc[model.df.start >= point, \"genealogy\"].iloc[0]\n gtrees.append(gtree)\nimport ipyrad.analysis as ipa\nast = ipa.astral(gtrees)\nast.run()\nast.tree.draw();\nExplanation: Infer a species tree from TRUE gene trees\nEnd of explanation\n# get two toytrees to compare\ntree1 = toytree.tree(model.df.genealogy[0])\ntree2 = toytree.tree(model.df.genealogy[100])\n# calculate normalized RF distance\nrf, rfmax, _, _, _, _, _ = tree1.treenode.robinson_foulds(tree2.treenode)\nprint(rf, rfmax, rf / rfmax)\n# unresolved tree example RF calc\nunresolved = tree1.collapse_nodes(min_dist=5e6)\nrf, rfmax, _, _, _, _, _ = unresolved.treenode.robinson_foulds(tree2.treenode, unrooted_trees=True)\nprint(rf, rfmax, rf / rfmax)\nExplanation: Measure RF distance between trees\nThe normalized RF distance. Larger value means trees are more different.\nEnd of explanation\nchrom ----------------------------------------------------------------\nwindows --------- ---------- ------------\nRAD loc - - - - - - \ngt erro --- --- --- ---\n# separate figure\nwindowsize x spptree error (astral)\nExplanation: Visualize gene tree error\nSome kind of sliding plot ...\nEnd of explanation"}}},{"rowIdx":2130,"cells":{"Unnamed: 0":{"kind":"number","value":2130,"string":"2,130"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Table of Contents\n

Aufgabe 1\n

Ersetzen Sie eine Reihe von Worten durch eine Reihe von Zahlen, die die Anzahl der Vokale anzeigen. Z.B.\nStep10: prozedurale Schreibweise\nStep11: Aufgabe 2\nVerwenden Sie map() um in einer Liste von Worten jedes Wort in Großbuchstaben auszugeben. Diskutieren Sie evtl. Probleme mit einem Nachbarn. \nAufgabe 3 (optional)\nLösen Sie Aufgabe 1 mit map()\nfilter()\nfilter(FunktionX, Liste)
\nDie Funktion FunktionX wird auf jedes Element der Liste angewandt. Konstruiert einen neuen Iterator, in den die Elemente der Liste aufgenommen werden, für die die FunktionX den Ausgabewert True hat. \n
Bsp.\nStep12: Aufgabe 4\nVerwenden Sie filter, um aus dem folgenden Text eine Wortliste zu erstellen, in der alle Pronomina, Artikel und die Worte \"dass\", \"ist\", \"nicht\", \"auch\", \"und\" nicht enthalten sind\nStep13: itertools.repeat(iterator, [n]) wiederholt die Elemente in iterator n mal.\nStep14: itertools.chain(iterator_1, iterator_2, ...) Erzeugt einen neuen Iterator, in dem die Elemente von iterator_1, _2 usw. aneinander gehängt sind.\nStep15: Aufgabe 5\nVerknüpfen Sie den Inhalt dreier Dateien zu einem Iterator\nTeile der Ausgabe eines Iterators auswählen.\nitertools.filterfalse(Prädikat, iterator) ist das Gegenstück zu filter(). Ausgabe enthält alle Elemente, für die das Prädikat falsch ist.\nitertools.takewhile(Prädikat, iterator) - gibt solange Elemente aus, wie das Prädikat wahr ist\nitertools.dropwhile(Prädikat, iter)entfernt alle Elemente, solange das Prädikat wahr ist. Gibt dann den Rest aus.\nitertools.compress(Daten, Selektoren) Nimmt zweei Iteratoren un dgibt nur die Elemente des ersten (Daten) zurück, für die das entsprechende Element im zweiten (Selektoren) wahr ist. Stoppt, wenn einer der Iteratoren erschöpft ist.\nIteratoren kombinieren\nitertools.combinations(Iterator, r) gibt alle r-Tuple Kombinationen der Elemente des Iterators wieder. Beispiel\nStep16: itertools.permutations(iterator, r) gibt alle Permutationen aller Elemente unabhängig von der Reihenfolge in Iterator wieder\nStep17: Aufgabe 7\nWieviele Zweier-Permutationen sind mit den graden Zahlen zwischen 1 und 101 möglich? \nThe operator module\nMathematische Operationen\nStep18: Lambda-Funktionen\nlambda erlaubt es, kleine Funktionen anonym zu definieren. Nehmen wir an, wir wollen in einer List von Zahlen alle Zahlen durch 100 teilen und mit 13 multiplizieren. Dann könnten wir das so machen\nStep19: Diese Funktion können wir mit Lambda nun direkt einsetzen\nStep20: Allerdings gibt es sehr unterschiedliche Meinungen darüber, ob auf diese Weise guter Code entsteht. Ich finde diesen Ratschlag anz gut\nStep21:
\n




\nAufgabe 2\nStep22:







\nAufgabe 3\nStep24:







\nAufgabe 4"},"code_prompt":{"kind":"string","value":"Python Code:\n#beispiel\na = [1, 2, 3,]\nmy_iterator = iter(a)\nmy_iterator.__next__()\nmy_iterator.__next__()\nExplanation: Table of Contents\n

\n## Python für Fortgeschrittene 2\n### Funktionales Programmieren I\n#### Typen von Programmiersprachen:\n
    \n
  • Prozedural
    \nProgramm besteht aus einer Liste von Anweisungen, die sequentiell abgearbeitet werden. Die meisten Programmiersprachen sind prozedural, z.B. C.
  • \n
  • Deklarativ
    \nIm Programm wird nur spezifiziert, welches Problem gelöst werden soll, der Interpreter setzt dies dann in Anweisungen um, z.B. SQL
  • \n
  • Objekt-orientiert
    \nProgramme erzeugen und verwenden Objekte und manipulieren diese Objekte. Objekte haben interne Zustände, die durch Methoden gesetzt werden, z.B. Java, C++.
  • \n
  • Funktional
    \nZerlegen ein Problem in eine Reihe von Funktionen (vergleichbar mit mathematischen Funktionen, z.B. f(x) = y. Die Funktionen haben einen definierten Input und Output, aber keine internen Zustand, der die Ausgabe eines bestimmten Input beeinflusst, z.B. Lisp oder Haskell.
  • \n
\n#### Weitere Merkmale des funktionalen Programmierens:\n
    \n
  • Funktionen können wie Daten behandelt werden, d.h. man kann einer Funktion als Parameter eine Funktion geben bzw. die Ausgabe einer Funktion kann eine Funktion sein.
  • \n
  • Rekursion ist die primäre Form der Ablaufkontrolle, etwa um Schleifen zu erzeugen.
  • \n
  • Im Zentrum steht die Manipulation von Listen.
  • \n
  • 'Reine' funktionale Programmiersprachen vermeiden Nebeneffekte, z.B. einer Variablen erst einen Wert und dann einen anderen zuzuweisen, um so den internen Zustand des Programms zu verfolgen. Einige Funktionen werden aber nur wegen ihrer 'Nebeneffekte' aufgerufen, z.B. print() oder time.sleep() und nicht für die Rückgabewerte der Funktion.
  • \n
  • Funktionale Programmiersprachen vermeiden Zuweisungen und arbeiten stattdessen mit Ausdrücken, also mit Funktionen, die Parameter haben und eine Ausgabe. Im Idealfall besteht das ganze Programm aus einer Folge von Funktionen, wobei die Ausgabe der einen Funktion zum Parameter der nächsten wird usw., z.B.:
    \na = 3
    \nfunc3(func2(func1(a)))
    \n
  • Funktionale Programmiersprachen verwenden vor allem Funktionen, die auf anderen Funktionen arbeiten, die auf anderen Funktionen arbeiten.\n
\n#### Vorteile des funktionalen Programmierens:\n
    \n
  • Formale Beweisbarkeit (eher von akademischem Interesse
  • \n
  • Modularität
    \nFunktionales Programmieren erzwingt das Schreiben von sehr kleinen Funktionen, die leichter wiederzuverwenden und modular einzusetzen sind.
  • \n
  • Einfachheit der Fehlersuche und des Testens
    \nDa Ein- und Ausgabe stets klar definiert sind, sind Fehlersuche und das Erstellen von Unittests einfacher
  • \n
\nWie immer gilt in Python auch hier: Python ermöglicht die Verwendung des funktionalen Paradigmas, erzwingt es aber nicht durch Einschränkungen, wie es reine funktionale Programmiersprachen tun. Typischerweise verwendet man in Python prozedurale, objekt-orientierte und funktionale Verfahren, z.B. kann man objekt-orientiertes und funktionales Programmieren verwenden, indem man Funktionen definiert, die als Ein- und Ausgabe Objekte verwenden.\nIn Python wird das funktionale Programmieren u.a. durch folgende Komponenten realisiert:\n
    \n
  • Iteratoren
  • \n
  • List Comprehension, Generator Expressions
  • \n
  • Die Funktionen map(), filter()
  • \n
  • Das itertools Modul
  • \n
\n### Iteratoren\nDie Methode iter() versucht für ein beliebiges Objekt einen Iterator zurückzugeben. Der Iterator gibt bei jedem Aufruf ein Objekt der Liste zurück und setzt den Pointer der Liste um eines höher. Objekte sind iterierbar (iterable) wenn sie die Methode iter() unterstützen, z.B. Listen, Dictionaries, Dateihandles usw.\nEnd of explanation\nfor i in a:\n print(str(i))\nExplanation: Python erwartet in bestimmten Kontexten ein iterierbares Objekt, z.B. in der for-Schleife:\nEnd of explanation\nfor i in iter(a):\n print(str(i))\nExplanation: Das ist äquivalent zu\nEnd of explanation\n#beispiel\na = [1, 2, 3,]\nmy_iterator = iter(a)\nlist(my_iterator)\nmy_iterator = iter(a)\ntuple(my_iterator)\nExplanation: Man kann sich die vollständige Ausgabe eines Iterators ausgeben lassen, wenn man ihn als Parameter der list()- oder tuple() Funktion übergibt.\nEnd of explanation\n#eine traditionelle for-Schleife:\nsquared = []\nfor x in range(10):\n squared.append(x**2)\nsquared\nExplanation: Frage: Warum habe ich im letzten Beispiel den Iterator neu erzeugt? Kann man das weglassen?\n

List Comprehension

\n

List Comprehension sind ein Element (von vielen) des funktionalen Programmierens in Python. Der wichtigste Vorteil ist das Vermeiden von Nebeneffekten. Was heißt das? Anstelle des Verändern des Zustands einer Datenstruktur (z.B. eines Objekts), sind funktionale Ausdrücke wie mathematische Funktionen aufgebaut, die nur aus einem klaren Input und einen ebenso eindeutig definierten Output bestehen.

\n

Prinzipielle Schreibweise:
\n[&lt;expression> for &lt;variable> in &lt;iterable> &lt;&lt;if &lt;condition> >>]\n

Im folgenden Beispiel ist es das Ziel, die Zahlen von 0 bis 9 ins Quadrat zu setzen. Zuerst die traditionelle Lösung mit einer for-Schleife, in deren Körper eine neue Datenstruktur aufgebaut wird.

\nEnd of explanation\n[x**2 for x in range(10)]\n#a + bx\n#2 + 0.5x\n#x = 5 bis x = 10\n[x*0.5 + 2 for x in range(5, 11)]\nExplanation: Und hier die Version mit List Comprehension:\nEnd of explanation\nsquared = [x**2 for x in range(10)]\nsquared\nExplanation: Natürlich kann man den Rückgabewert von List Comprehensions auch in einer Variablen abspeichern.\nEnd of explanation\n#Aufgabe: vergleiche zwei Zahlenlisten und gebe alle Zahlenkombinationen aus, die ungleich sind\n#Erst einmal die traditionelle Lösung mit geschachtelten Schleifen:\ncombs = [] \nfor x in [1,2,3 ]: \n for y in [3,1,4]: \n if x != y: \n combs.append((x, y))\ncombs\nExplanation: Geschachtelte Schleifen\nMan kann in list comprehensions auch mehrere geschachtelte for-Schleifen aufrufen:\nEnd of explanation\n[(x,y) for x in [1,2,3] for y in [3,1,4] if x != y]\nExplanation: Und nun als List Comprehension:\nEnd of explanation\na = [\"ein Haus\", \"eine Tasse\", \"ein Kind\"]\nlist(map(len, a))\nExplanation:

Aufgabe 1

\n

Ersetzen Sie eine Reihe von Worten durch eine Reihe von Zahlen, die die Anzahl der Vokale anzeigen. Z.B.: \"Dies ist ein Satz\" -> \"2 1 2 1\".

\nDie Funktionen map(), filter()\nmap()\nmap(FunktionX, Liste)
\nDie Funktion FunktionX wird auf jedes Element der Liste angewandt. Ausgabe ist ein Iterator über eine neue Liste mit den Ergebnissen\nEnd of explanation\nfor i in a:\n print(len(i))\nExplanation: prozedurale Schreibweise:\nEnd of explanation\n#returns True if x is an even number\ndef is_even(x): \n return (x % 2) == 0 \nb = [2,3,4,5,6]\nlist(filter(is_even, b))\nExplanation: Aufgabe 2\nVerwenden Sie map() um in einer Liste von Worten jedes Wort in Großbuchstaben auszugeben. Diskutieren Sie evtl. Probleme mit einem Nachbarn. \nAufgabe 3 (optional)\nLösen Sie Aufgabe 1 mit map()\nfilter()\nfilter(FunktionX, Liste)
\nDie Funktion FunktionX wird auf jedes Element der Liste angewandt. Konstruiert einen neuen Iterator, in den die Elemente der Liste aufgenommen werden, für die die FunktionX den Ausgabewert True hat. \n
Bsp.:\nEnd of explanation\nimport itertools\n#don't try this at home:\n#list(itertools.cycle([1,2,3,4,5]))\nExplanation: Aufgabe 4\nVerwenden Sie filter, um aus dem folgenden Text eine Wortliste zu erstellen, in der alle Pronomina, Artikel und die Worte \"dass\", \"ist\", \"nicht\", \"auch\", \"und\" nicht enthalten sind:
\n\"Ich denke auch, dass ist nicht schlimm. Er hat es nicht gemerkt und das ist gut. Und überhaupt: es ist auch seine Schuld. Ehrlich, das ist wahr.\"\nDas itertools-Modul\nDie Funktionen des itertools-Moduls lassen sich einteilen in Funktionen, die: \n
    \n
  • die einen neuen Iterator auf der Basis eines existierenden Iterators erzeugen.
  • \n
  • die Teile der Ausgabe eines Iterators auswählen.
  • \n
  • die die Ausgabe eines Iterators gruppieren.
  • \n
  • die Iteratoren kombinieren
  • \n
\nNeuen Iterator erzeugen\nDiese Funktionen erzeugen einen neuen Iterator auf der Basis eines existierenden:
\nitertools.count(),itertools.cycle(), itertools.repeat(), itertools.chain(), itertools.isslice(), itertools.tee() \nitertools.cycle(iterator) Gibt die Liste der Elemente in iterator in einer unendlichen Schleife zurück\nEnd of explanation\nimport itertools\nlist(itertools.repeat([1,2,3,4], 3))\nExplanation: itertools.repeat(iterator, [n]) wiederholt die Elemente in iterator n mal.\nEnd of explanation\na = [1, 2, 3]\nb = [4, 5, 6]\nc = [7, 8, 9]\nlist(itertools.chain(a, b, c))\nExplanation: itertools.chain(iterator_1, iterator_2, ...) Erzeugt einen neuen Iterator, in dem die Elemente von iterator_1, _2 usw. aneinander gehängt sind.\nEnd of explanation\ntuple(itertools.combinations([1, 2, 3, 4], 2))\nExplanation: Aufgabe 5\nVerknüpfen Sie den Inhalt dreier Dateien zu einem Iterator\nTeile der Ausgabe eines Iterators auswählen.\nitertools.filterfalse(Prädikat, iterator) ist das Gegenstück zu filter(). Ausgabe enthält alle Elemente, für die das Prädikat falsch ist.\nitertools.takewhile(Prädikat, iterator) - gibt solange Elemente aus, wie das Prädikat wahr ist\nitertools.dropwhile(Prädikat, iter)entfernt alle Elemente, solange das Prädikat wahr ist. Gibt dann den Rest aus.\nitertools.compress(Daten, Selektoren) Nimmt zweei Iteratoren un dgibt nur die Elemente des ersten (Daten) zurück, für die das entsprechende Element im zweiten (Selektoren) wahr ist. Stoppt, wenn einer der Iteratoren erschöpft ist.\nIteratoren kombinieren\nitertools.combinations(Iterator, r) gibt alle r-Tuple Kombinationen der Elemente des Iterators wieder. Beispiel:\nEnd of explanation\ntuple(itertools.permutations([1, 2, 3, 4], 2)) \nExplanation: itertools.permutations(iterator, r) gibt alle Permutationen aller Elemente unabhängig von der Reihenfolge in Iterator wieder:\nEnd of explanation\na = [2, -3, 8, 12, -22, -1]\nlist(map(abs, a))\nExplanation: Aufgabe 7\nWieviele Zweier-Permutationen sind mit den graden Zahlen zwischen 1 und 101 möglich? \nThe operator module\nMathematische Operationen: add(), sub(), mul(), floordiv(), abs(), ...
\nLogische Operationen: not_(), truth()
\nBit Operationen: and_(), or_(), invert()
\nVergleiche: eq(), ne(), lt(), le(), gt(), and ge()
\nObjektidentität: is_(), is_not()
\nEnd of explanation\ndef calc(n):\n return (n * 13) / 100\na = [1, 2, 5, 7]\nlist(map(calc, a))\nExplanation: Lambda-Funktionen\nlambda erlaubt es, kleine Funktionen anonym zu definieren. Nehmen wir an, wir wollen in einer List von Zahlen alle Zahlen durch 100 teilen und mit 13 multiplizieren. Dann könnten wir das so machen:\nEnd of explanation\nlist(map(lambda x: (x * 13)/100, a))\nExplanation: Diese Funktion können wir mit Lambda nun direkt einsetzen:\nEnd of explanation\n#zählt die Vokale eines strings\ndef cv(word):\n return sum([1 for a in word if a in \"aeiouAEIOUÄÖÜäöü\"])\na = \"Dies ist eine Lüge, oder nicht?\"\n[cv(w) for w in a.split()]\nExplanation: Allerdings gibt es sehr unterschiedliche Meinungen darüber, ob auf diese Weise guter Code entsteht. Ich finde diesen Ratschlag anz gut: \n
    \n
  • Write a lambda function.
  • \n
  • Write a comment explaining what the heck that lambda does.
  • \n
  • Study the comment for a while, and think of a name that captures the essence of the comment.
  • \n
  • Convert the lambda to a def statement, using that name.
  • \n
  • Remove the comment.
  • \n
\nHausaufgabe\n1) Geben Sie alle Unicode-Zeichen zwischen 34 und 250 aus und geben Sie alle aus, die keine Buchstaben oder Zahlen sind\n2) Wie könnte man alle Dateien mit der Endung *.txt in einem Unterverzeichnis hintereinander ausgeben? \n3) Schauen Sie sich in der Python-Dokumentation die Funktionen sort und itemgetter an. Wie kann man diese so kombinieren, dass man damit ein Dictionary nach dem value sortieren kann. (no stackoverflow :-)\n







\nLösungen\nAufgabe 1\nEnd of explanation\n#uppeditys the string word \ndef upper(word):\n return word.upper()\na = [\"dies\", \"ist\", \"Ein\", \"satz\"]\nlist(map(upper, a))\nExplanation:
\n




\nAufgabe 2\nEnd of explanation\ndef cv(word):\n return sum([1 for a in word if a in \"aeiouAEIOUÄÖÜäöü\"])\na = \"Dies ist eine Lüge, oder nicht?\"\nlist(map(cv, a.split()))\nExplanation:







\nAufgabe 3\nEnd of explanation\nimport re\n#returns True if word is a function word\ndef is_no_function_word(word):\n f_words = [\"der\", \"die\", \"das\", \"ich\", \"du\", \"er\", \"sie\", \"es\", \"wir\", \"ihr\", \"dass\", \"ist\", \"hat\", \"auch\", \"und\", \"nicht\"]\n if word.lower() in f_words:\n return False\n else: \n return True\n \n \ntext = Ich denke auch, dass ist nicht schlimm. Er hat es nicht gemerkt und das ist gut. \n Und überhaupt: es ist auch seine Schuld. Ehrlich, das ist wahr.\nlist(filter(is_no_function_word, re.findall(\"\\w+\", text)))\nExplanation:







\nAufgabe 4\nEnd of explanation"}}},{"rowIdx":2131,"cells":{"Unnamed: 0":{"kind":"number","value":2131,"string":"2,131"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n 机器学习纳米学位\n监督学习\n项目2\nStep1: 准备数据\n在数据能够被作为输入提供给机器学习算法之前,它经常需要被清洗,格式化,和重新组织 - 这通常被叫做预处理。幸运的是,对于这个数据集,没有我们必须处理的无效或丢失的条目,然而,由于某一些特征存在的特性我们必须进行一定的调整。这个预处理都可以极大地帮助我们提升几乎所有的学习算法的结果和预测能力。\n获得特征和标签\nincome 列是我们需要的标签,记录一个人的年收入是否高于50K。 因此我们应该把他从数据中剥离出来,单独存放。\nStep2: 转换倾斜的连续特征\n一个数据集有时可能包含至少一个靠近某个数字的特征,但有时也会有一些相对来说存在极大值或者极小值的不平凡分布的的特征。算法对这种分布的数据会十分敏感,并且如果这种数据没有能够很好地规一化处理会使得算法表现不佳。在人口普查数据集的两个特征符合这个描述:'capital-gain'和'capital-loss'。\n运行下面的代码单元以创建一个关于这两个特征的条形图。请注意当前的值的范围和它们是如何分布的。\nStep3: 对于高度倾斜分布的特征如'capital-gain'和'capital-loss',常见的做法是对数据施加一个\n这五个特征和你在问题 6中讨论的特征比较怎么样?
\n如果说你的答案和这里的相近,那么这个可视化怎样佐证了你的想法?
\n如果你的选择不相近,那么为什么你觉得这些特征更加相关?\n回答:\n特征选择\n如果我们只是用可用特征的一个子集的话模型表现会怎么样?通过使用更少的特征来训练,在评价指标的角度来看我们的期望是训练和预测的时间会更少。从上面的可视化来看,我们可以看到前五个最重要的特征贡献了数据中所有特征中超过一半的重要性。这提示我们可以尝试去减小特征空间,简化模型需要学习的信息。下面代码单元将使用你前面发现的优化模型,并只使用五个最重要的特征在相同的训练集上训练模型。\nStep13: 问题 8 - 特征选择的影响\n最终模型在只是用五个特征的数据上和使用所有的特征数据上的 F-score 和准确率相比怎么样?\n如果训练时间是一个要考虑的因素,你会考虑使用部分特征的数据作为你的训练集吗?\n回答:\n问题 9 - 在测试集上测试你的模型\n终于到了测试的时候,记住,测试集只能用一次。\n使用你最有信心的模型,在测试集上测试,计算出准确率和 F-score。\n简述你选择这个模型的原因,并分析测试结果"},"code_prompt":{"kind":"string","value":"Python Code:\n# TODO:总的记录数\nn_records = len(data)\n# # TODO:被调查者 的收入大于$50,000的人数\nn_greater_50k = len(data[data.income.str.contains('>50K')])\n# # TODO:被调查者的收入最多为$50,000的人数\nn_at_most_50k = len(data[data.income.str.contains('<=50K')])\n# # TODO:被调查者收入大于$50,000所占的比例\ngreater_percent = (n_greater_50k / n_records) * 100\n# 打印结果\nprint (\"Total number of records: {}\".format(n_records))\nprint (\"Individuals making more than $50,000: {}\".format(n_greater_50k))\nprint (\"Individuals making at most $50,000: {}\".format(n_at_most_50k))\nprint (\"Percentage of individuals making more than $50,000: {:.2f}%\".format(greater_percent))\nExplanation: 机器学习纳米学位\n监督学习\n项目2: 为CharityML寻找捐献者\n欢迎来到机器学习工程师纳米学位的第二个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以'练习'开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示!\n除了实现代码外,你还必须回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以'问题 X'为标题。请仔细阅读每个问题,并且在问题后的'回答'文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。\n提示:Code 和 Markdown 区域可通过Shift + Enter快捷键运行。此外,Markdown可以通过双击进入编辑模式。\n开始\n在这个项目中,你将使用1994年美国人口普查收集的数据,选用几个监督学习算法以准确地建模被调查者的收入。然后,你将根据初步结果从中选择出最佳的候选算法,并进一步优化该算法以最好地建模这些数据。你的目标是建立一个能够准确地预测被调查者年收入是否超过50000美元的模型。这种类型的任务会出现在那些依赖于捐款而存在的非营利性组织。了解人群的收入情况可以帮助一个非营利性的机构更好地了解他们要多大的捐赠,或是否他们应该接触这些人。虽然我们很难直接从公开的资源中推断出一个人的一般收入阶层,但是我们可以(也正是我们将要做的)从其他的一些公开的可获得的资源中获得一些特征从而推断出该值。\n这个项目的数据集来自UCI机器学习知识库。这个数据集是由Ron Kohavi和Barry Becker在发表文章_\"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid\"_之后捐赠的,你可以在Ron Kohavi提供的在线版本中找到这个文章。我们在这里探索的数据集相比于原有的数据集有一些小小的改变,比如说移除了特征'fnlwgt' 以及一些遗失的或者是格式不正确的记录。\n探索数据\n运行下面的代码单元以载入需要的Python库并导入人口普查数据。注意数据集的最后一列'income'将是我们需要预测的列(表示被调查者的年收入会大于或者是最多50,000美元),人口普查数据中的每一列都将是关于被调查者的特征。\n练习:数据探索\n首先我们对数据集进行一个粗略的探索,我们将看看每一个类别里会有多少被调查者?并且告诉我们这些里面多大比例是年收入大于50,000美元的。在下面的代码单元中,你将需要计算以下量:\n总的记录数量,'n_records'\n年收入大于50,000美元的人数,'n_greater_50k'.\n年收入最多为50,000美元的人数 'n_at_most_50k'.\n年收入大于50,000美元的人所占的比例, 'greater_percent'.\n提示: 您可能需要查看上面的生成的表,以了解'income'条目的格式是什么样的。\nEnd of explanation\n# 为这个项目导入需要的库\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom IPython.display import display # 允许为DataFrame使用display()\n# 导入附加的可视化代码visuals.py\nimport visuals as vs\n# 为notebook提供更加漂亮的可视化\n%matplotlib inline\n# 导入人口普查数据\ndata = pd.read_csv(\"census.csv\")\n# 成功 - 显示第一条记录\ndisplay(data.head(n=1))\n# 将数据切分成特征和对应的标签\nincome_raw = data['income']\nfeatures_raw = data.drop('income', axis = 1)\nExplanation: 准备数据\n在数据能够被作为输入提供给机器学习算法之前,它经常需要被清洗,格式化,和重新组织 - 这通常被叫做预处理。幸运的是,对于这个数据集,没有我们必须处理的无效或丢失的条目,然而,由于某一些特征存在的特性我们必须进行一定的调整。这个预处理都可以极大地帮助我们提升几乎所有的学习算法的结果和预测能力。\n获得特征和标签\nincome 列是我们需要的标签,记录一个人的年收入是否高于50K。 因此我们应该把他从数据中剥离出来,单独存放。\nEnd of explanation\n# 可视化 'capital-gain'和'capital-loss' 两个特征\nvs.distribution(features_raw)\nExplanation: 转换倾斜的连续特征\n一个数据集有时可能包含至少一个靠近某个数字的特征,但有时也会有一些相对来说存在极大值或者极小值的不平凡分布的的特征。算法对这种分布的数据会十分敏感,并且如果这种数据没有能够很好地规一化处理会使得算法表现不佳。在人口普查数据集的两个特征符合这个描述:'capital-gain'和'capital-loss'。\n运行下面的代码单元以创建一个关于这两个特征的条形图。请注意当前的值的范围和它们是如何分布的。\nEnd of explanation\n# 对于倾斜的数据使用Log转换\nskewed = ['capital-gain', 'capital-loss']\nfeatures_raw[skewed] = data[skewed].apply(lambda x: np.log(x + 1))\n# 可视化对数转换后 'capital-gain'和'capital-loss' 两个特征\nvs.distribution(features_raw, transformed = True)\nExplanation: 对于高度倾斜分布的特征如'capital-gain'和'capital-loss',常见的做法是对数据施加一个
对数转换,将数据转换成对数,这样非常大和非常小的值不会对学习算法产生负面的影响。并且使用对数变换显著降低了由于异常值所造成的数据范围异常。但是在应用这个变换时必须小心:因为0的对数是没有定义的,所以我们必须先将数据处理成一个比0稍微大一点的数以成功完成对数转换。\n运行下面的代码单元来执行数据的转换和可视化结果。再次,注意值的范围和它们是如何分布的。\nEnd of explanation\nfrom sklearn.preprocessing import MinMaxScaler\n# 初始化一个 scaler,并将它施加到特征上\nscaler = MinMaxScaler()\nnumerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']\nfeatures_raw[numerical] = scaler.fit_transform(data[numerical])\n# 显示一个经过缩放的样例记录\ndisplay(features_raw.head(n = 1))\nExplanation: 规一化数字特征\n除了对于高度倾斜的特征施加转换,对数值特征施加一些形式的缩放通常会是一个好的习惯。在数据上面施加一个缩放并不会改变数据分布的形式(比如上面说的'capital-gain' or 'capital-loss');但是,规一化保证了每一个特征在使用监督学习器的时候能够被平等的对待。注意一旦使用了缩放,观察数据的原始形式不再具有它本来的意义了,就像下面的例子展示的。\n运行下面的代码单元来规一化每一个数字特征。我们将使用sklearn.preprocessing.MinMaxScaler来完成这个任务。\nEnd of explanation\n# TODO:使用pandas.get_dummies()对'features_raw'数据进行独热编码\nfeatures = pd.get_dummies(features_raw)\n# TODO:将'income_raw'编码成数字值\nincome = income_raw.replace(['>50K', '<=50K'], [1, 0])\n# 打印经过独热编码之后的特征数量\nencoded = list(features.columns)\nprint (\"{} total features after one-hot encoding.\".format(len(encoded)))\n# 移除下面一行的注释以观察编码的特征名字\n#print encoded\nExplanation: 练习:数据预处理\n从上面的数据探索中的表中,我们可以看到有几个属性的每一条记录都是非数字的。通常情况下,学习算法期望输入是数字的,这要求非数字的特征(称为类别变量)被转换。转换类别变量的一种流行的方法是使用独热编码方案。独热编码为每一个非数字特征的每一个可能的类别创建一个_“虚拟”_变量。例如,假设someFeature有三个可能的取值A,B或者C,。我们将把这个特征编码成someFeature_A, someFeature_B和someFeature_C.\n| 特征X | | 特征X_A | 特征X_B | 特征X_C |\n| :-: | | :-: | :-: | :-: |\n| B | | 0 | 1 | 0 |\n| C | ----> 独热编码 ----> | 0 | 0 | 1 |\n| A | | 1 | 0 | 0 |\n此外,对于非数字的特征,我们需要将非数字的标签'income'转换成数值以保证学习算法能够正常工作。因为这个标签只有两种可能的类别(\"<=50K\"和\">50K\"),我们不必要使用独热编码,可以直接将他们编码分别成两个类0和1,在下面的代码单元中你将实现以下功能:\n - 使用pandas.get_dummies()对'features_raw'数据来施加一个独热编码。\n - 将目标标签'income_raw'转换成数字项。\n - 将\"<=50K\"转换成0;将\">50K\"转换成1。\nEnd of explanation\n# 导入 train_test_split\nfrom sklearn.model_selection import train_test_split\n# 将'features'和'income'数据切分成训练集和测试集\nX_train, X_test, y_train, y_test = train_test_split(features, income, test_size = 0.2, random_state = 0,\n stratify = income)\n# 将'X_train'和'y_train'进一步切分为训练集和验证集\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0,\n stratify = y_train)\n# 显示切分的结果\nprint (\"Training set has {} samples.\".format(X_train.shape[0]))\nprint (\"Validation set has {} samples.\".format(X_val.shape[0]))\nprint (\"Testing set has {} samples.\".format(X_test.shape[0]))\nExplanation: 混洗和切分数据\n现在所有的 类别变量 已被转换成数值特征,而且所有的数值特征已被规一化。和我们一般情况下做的一样,我们现在将数据(包括特征和它们的标签)切分成训练和测试集。其中80%的数据将用于训练和20%的数据用于测试。然后再进一步把训练数据分为训练集和验证集,用来选择和优化模型。\n运行下面的代码单元来完成切分。\nEnd of explanation\n#不能使用scikit-learn,你需要根据公式自己实现相关计算。\n#TODO: 计算准确率\naccuracy = np.divide(n_greater_50k, float(n_records))\n# TODO: 计算查准率 Precision\nprecision = np.divide(n_greater_50k, float(n_records))\n# TODO: 计算查全率 Recall\nrecall = np.divide(n_greater_50k, n_greater_50k)\n# TODO: 使用上面的公式,设置beta=0.5,计算F-score\nfscore = (1 + np.power(0.5, 2)) * np.multiply(precision, recall) / (np.power(0.5, 2) * precision + recall)\n# 打印结果\nprint (\"Naive Predictor on validation data: \\n \\\n Accuracy score: {:.4f} \\n \\\n Precision: {:.4f} \\n \\\n Recall: {:.4f} \\n \\\n F-score: {:.4f}\".format(accuracy, precision, recall, fscore))\nExplanation: 评价模型性能\n在这一部分中,我们将尝试四种不同的算法,并确定哪一个能够最好地建模数据。四种算法包含一个天真的预测器 和三个你选择的监督学习器。\n评价方法和朴素的预测器\nCharityML通过他们的研究人员知道被调查者的年收入大于\\$50,000最有可能向他们捐款。因为这个原因CharityML对于准确预测谁能够获得\\$50,000以上收入尤其有兴趣。这样看起来使用准确率作为评价模型的标准是合适的。另外,把没有收入大于\\$50,000的人识别成年收入大于\\$50,000对于CharityML来说是有害的,因为他想要找到的是有意愿捐款的用户。这样,我们期望的模型具有准确预测那些能够年收入大于\\$50,000的能力比模型去查全这些被调查者更重要。我们能够使用F-beta score作为评价指标,这样能够同时考虑查准率和查全率:\n$$ F_{\\beta} = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{\\left( \\beta^2 \\cdot precision \\right) + recall} $$\n尤其是,当 $\\beta = 0.5$ 的时候更多的强调查准率,这叫做F$_{0.5}$ score (或者为了简单叫做F-score)。\nEnd of explanation\n# TODO:从sklearn中导入两个评价指标 - fbeta_score和accuracy_score\nfrom sklearn.metrics import fbeta_score, accuracy_score\ndef train_predict(learner, sample_size, X_train, y_train, X_val, y_val): \n '''\n inputs:\n - learner: the learning algorithm to be trained and predicted on\n - sample_size: the size of samples (number) to be drawn from training set\n - X_train: features training set\n - y_train: income training set\n - X_val: features validation set\n - y_val: income validation set\n '''\n \n results = {}\n \n # TODO:使用sample_size大小的训练数据来拟合学习器\n # TODO: Fit the learner to the training data using slicing with 'sample_size'\n start = time() # 获得程序开始时间\n learner = learner.fit(X_train[:sample_size],y_train[:sample_size])\n end = time() # 获得程序结束时间\n \n # TODO:计算训练时间\n results['train_time'] = end - start\n print(results['train_time'])\n \n # TODO: 得到在验证集上的预测值\n # 然后得到对前300个训练数据的预测结果\n start = time() # 获得程序开始时间\n predictions_val = learner.predict(X_val)\n predictions_train = learner.predict(X_train[:300])\n end = time() # 获得程序结束时间\n \n # TODO:计算预测用时\n results['pred_time'] = end - start\n \n # TODO:计算在最前面的300个训练数据的准确率\n results['acc_train'] = accuracy_score(y_train[:300],predictions_train)\n \n # TODO:计算在验证上的准确率\n results['acc_test'] = accuracy_score( y_val, predictions_val)\n \n # TODO:计算在最前面300个训练数据上的F-score\n results['f_train'] = fbeta_score(y_train[:300], predictions_train, 0.5)\n \n # TODO:计算验证集上的F-score\n results['f_test'] = fbeta_score(y_val,predictions_val,0.5)\n \n # 成功\n print (\"{} trained on {} samples.\".format(learner.__class__.__name__, sample_size))\n \n # 返回结果\n return results\nExplanation: 问题 1 - 天真的预测器的性能\n通过查看收入超过和不超过 \\$50,000 的人数,我们能发现多数被调查者年收入没有超过 \\$50,000。如果我们简单地预测说“这个人的收入没有超过 \\$50,000”,我们就可以得到一个 准确率超过 50% 的预测。这样我们甚至不用看数据就能做到一个准确率超过 50%。这样一个预测被称作是天真的。通常对数据使用一个天真的预测器是十分重要的,这样能够帮助建立一个模型表现是否好的基准。 使用下面的代码单元计算天真的预测器的相关性能。将你的计算结果赋值给'accuracy', ‘precision’, ‘recall’ 和 'fscore',这些值会在后面被使用,请注意这里不能使用scikit-learn,你需要根据公式自己实现相关计算。\n如果我们选择一个无论什么情况都预测被调查者年收入大于 \\$50,000 的模型,那么这个模型在验证集上的准确率,查准率,查全率和 F-score是多少? \n监督学习模型\n问题 2 - 模型应用\n你能够在 scikit-learn 中选择以下监督学习模型\n- 高斯朴素贝叶斯 (GaussianNB)\n- 决策树 (DecisionTree)\n- 集成方法 (Bagging, AdaBoost, Random Forest, Gradient Boosting)\n- K近邻 (K Nearest Neighbors)\n- 随机梯度下降分类器 (SGDC)\n- 支撑向量机 (SVM)\n- Logistic回归(LogisticRegression)\n从上面的监督学习模型中选择三个适合我们这个问题的模型,并回答相应问题。\n模型1\n模型名称\n回答:决策树\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:学生录取资格(来自机器学习课程(决策树))\n这个模型的优势是什么?他什么情况下表现最好?\n回答:优势:1、决策树易于实现和理解;2、计算复杂度相对较低,结果的输出易于理解。\n当目标函数具有离散的输出值值表现最好\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:可能出现过拟合问题。当过于依赖数据或参数设置不好时,它的表现很差。\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:1、该问题是非线性问题,决策树能够很好地解决非线性问题;2、我们的数据中有大量布尔型特征且它的一些特征对于我们的目标可能相关程度并不高\n模型2\n模型名称\n回答:高斯朴素贝叶斯\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:过滤垃圾邮件,可以把文档中的词作为特征进行分类(来自机器学习课程(朴素贝叶斯)))。\n这个模型的优势是什么?他什么情况下表现最好?\n回答:优势是在数据较少的情况下仍然有效,对缺失数据不敏感。适合小规模数据\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:朴素贝叶斯模型假设各属性相互独立。但在实际应用中,属性之间往往有一定关联性,导致分类效果受到影响。\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:数据集各属性关联性相对较小,且为小规模数据\n模型3\n模型名称\n回答:AdaBoost\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:预测患有疝病的马是否存活\n这个模型的优势是什么?他什么情况下表现最好?\n回答:优势是泛化错误低,易编码,可以应用在大部分分类器上,无参数调整。对于基于错误提升分类器性能它的表现最好\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:缺点是对离群点敏感。当输入数据有不少极端值时,它的表现很差\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:我们的数据集特征很多,较为复杂,在后续迭代中,出现错误的数据权重可能增大,而针对这种错误的调节能力正是AdaBoost的长处\n练习 - 创建一个训练和预测的流水线\n为了正确评估你选择的每一个模型的性能,创建一个能够帮助你快速有效地使用不同大小的训练集并在验证集上做预测的训练和验证的流水线是十分重要的。\n你在这里实现的功能将会在接下来的部分中被用到。在下面的代码单元中,你将实现以下功能:\n从sklearn.metrics中导入fbeta_score和accuracy_score。\n用训练集拟合学习器,并记录训练时间。\n对训练集的前300个数据点和验证集进行预测并记录预测时间。\n计算预测训练集的前300个数据点的准确率和F-score。\n计算预测验证集的准确率和F-score。\nEnd of explanation\n# TODO:从sklearn中导入三个监督学习模型\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import AdaBoostClassifier\n# TODO:初始化三个模型\nclf_A = DecisionTreeClassifier()\nclf_B = GaussianNB()\nclf_C = AdaBoostClassifier()\n# TODO:计算1%, 10%, 100%的训练数据分别对应多少点\nsamples_1 = int(len(X_train)*0.01)\nsamples_10 = int(len(X_train)*0.1)\nsamples_100 = int(len(X_train))\n# 收集学习器的结果\nresults = {}\nfor clf in [clf_A, clf_B, clf_C]:\n clf_name = clf.__class__.__name__\n results[clf_name] = {}\n for i, samples in enumerate([samples_1, samples_10, samples_100]):\n results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_val, y_val)\n# 对选择的三个模型得到的评价结果进行可视化\nvs.evaluate(results, accuracy, fscore)\nExplanation: 练习:初始模型的评估\n在下面的代码单元中,您将需要实现以下功能: \n- 导入你在前面讨论的三个监督学习模型。 \n- 初始化三个模型并存储在'clf_A','clf_B'和'clf_C'中。\n - 使用模型的默认参数值,在接下来的部分中你将需要对某一个模型的参数进行调整。 \n - 设置random_state (如果有这个参数)。 \n- 计算1%, 10%, 100%的训练数据分别对应多少个数据点,并将这些值存储在'samples_1', 'samples_10', 'samples_100'中\n注意:取决于你选择的算法,下面实现的代码可能需要一些时间来运行!\nEnd of explanation\n# TODO:导入'GridSearchCV', 'make_scorer'和其他一些需要的库\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import fbeta_score,make_scorer\n# TODO:初始化分类器\nclf = AdaBoostClassifier(random_state=0)\n# TODO:创建你希望调节的参数列表\nparameters = {'n_estimators': [50, 100, 200]}\n# TODO:创建一个fbeta_score打分对象\nscorer = make_scorer(fbeta_score, beta=0.5)\n# TODO:在分类器上使用网格搜索,使用'scorer'作为评价函数\ngrid_obj = GridSearchCV(clf, parameters,scorer)\n# TODO:用训练数据拟合网格搜索对象并找到最佳参数\ngrid_obj = grid_obj.fit(X_train, y_train)\n# 得到estimator\nbest_clf = grid_obj.best_estimator_\n# 使用没有调优的模型做预测\npredictions = (clf.fit(X_train, y_train)).predict(X_val)\nbest_predictions = best_clf.predict(X_val)\n# 汇报调优后的模型\nprint (\"best_clf\\n------\")\nprint (best_clf)\n# 汇报调参前和调参后的分数\nprint (\"\\nUnoptimized model\\n------\")\nprint (\"Accuracy score on validation data: {:.4f}\".format(accuracy_score(y_val, predictions)))\nprint (\"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, predictions, beta = 0.5)))\nprint (\"\\nOptimized Model\\n------\")\nprint (\"Final accuracy score on the validation data: {:.4f}\".format(accuracy_score(y_val, best_predictions)))\nprint (\"Final F-score on the validation data: {:.4f}\".format(fbeta_score(y_val, best_predictions, beta = 0.5)))\nExplanation: 提高效果\n在这最后一节中,您将从三个有监督的学习模型中选择 最好的 模型来使用学生数据。你将在整个训练集(X_train和y_train)上使用网格搜索优化至少调节一个参数以获得一个比没有调节之前更好的 F-score。\n问题 3 - 选择最佳的模型\n基于你前面做的评价,用一到两段话向 CharityML 解释这三个模型中哪一个对于判断被调查者的年收入大于 \\$50,000 是最合适的。 \n提示:你的答案应该包括评价指标,预测/训练时间,以及该算法是否适合这里的数据。\n回答:DecisionTree在训练集上的accuracy score和F-score在三个模型中是最好的,虽然DecisionTree在测试集上的表现没这么好,在无参数调整的情况下出现了轻度的过拟合,但调整参数后应该可以消除这个问题,虽然对完整数据它的训练时间较长,但比AdaBoost快多了,且考虑到它的预测时间短,也就是查询时间短,我们一旦把模型训练出来,之后的主要任务就只有查询了,并不会过多消耗资源和开支,所以我还是决定使用DecisionTree.\n问题 4 - 用通俗的话解释模型\n用一到两段话,向 CharityML 用外行也听得懂的话来解释最终模型是如何工作的。你需要解释所选模型的主要特点。例如,这个模型是怎样被训练的,它又是如何做出预测的。避免使用高级的数学或技术术语,不要使用公式或特定的算法名词。\n回答: 根据训练集中输入的特征进行逐步分类,并形成相应的树状结构,输入预测值的特征,根据特征的值寻找树的响应节点,知道最后的节点,就是预测的结果\n练习:模型调优\n调节选择的模型的参数。使用网格搜索(GridSearchCV)来至少调整模型的重要参数(至少调整一个),这个参数至少需尝试3个不同的值。你要使用整个训练集来完成这个过程。在接下来的代码单元中,你需要实现以下功能:\n导入sklearn.model_selection.GridSearchCV 和 sklearn.metrics.make_scorer.\n初始化你选择的分类器,并将其存储在clf中。\n设置random_state (如果有这个参数)。\n创建一个对于这个模型你希望调整参数的字典。\n例如: parameters = {'parameter' : [list of values]}。\n注意: 如果你的学习器有 max_features 参数,请不要调节它!\n使用make_scorer来创建一个fbeta_score评分对象(设置$\\beta = 0.5$)。\n在分类器clf上用'scorer'作为评价函数运行网格搜索,并将结果存储在grid_obj中。\n用训练集(X_train, y_train)训练grid search object,并将结果存储在grid_fit中。\n注意: 取决于你选择的参数列表,下面实现的代码可能需要花一些时间运行!\nEnd of explanation\n# TODO:导入一个有'feature_importances_'的监督学习模型\n# TODO:在训练集上训练一个监督学习模型\nmodel = None\n# TODO: 提取特征重要性\nimportances = None\n# 绘图\nvs.feature_plot(importances, X_train, y_train)\nExplanation: 问题 5 - 最终模型评估\n你的最优模型在测试数据上的准确率和 F-score 是多少?这些分数比没有优化的模型好还是差?\n注意:请在下面的表格中填写你的结果,然后在答案框中提供讨论。\n结果:\n| 评价指标 | 未优化的模型 | 优化的模型 |\n| :------------: | :---------------: | :-------------: | \n| 准确率 | | |\n| F-score | | |\n回答:\n特征的重要性\n在数据上(比如我们这里使用的人口普查的数据)使用监督学习算法的一个重要的任务是决定哪些特征能够提供最强的预测能力。专注于少量的有效特征和标签之间的关系,我们能够更加简单地理解这些现象,这在很多情况下都是十分有用的。在这个项目的情境下这表示我们希望选择一小部分特征,这些特征能够在预测被调查者是否年收入大于\\$50,000这个问题上有很强的预测能力。\n选择一个有 'feature_importance_' 属性的scikit学习分类器(例如 AdaBoost,随机森林)。'feature_importance_' 属性是对特征的重要性排序的函数。在下一个代码单元中用这个分类器拟合训练集数据并使用这个属性来决定人口普查数据中最重要的5个特征。\n问题 6 - 观察特征相关性\n当探索数据的时候,它显示在这个人口普查数据集中每一条记录我们有十三个可用的特征。 \n在这十三个记录中,你认为哪五个特征对于预测是最重要的,选择每个特征的理由是什么?你会怎样对他们排序?\n回答:\n- 特征1:\n- 特征2:\n- 特征3:\n- 特征4:\n- 特征5:\n练习 - 提取特征重要性\n选择一个scikit-learn中有feature_importance_属性的监督学习分类器,这个属性是一个在做预测的时候根据所选择的算法来对特征重要性进行排序的功能。\n在下面的代码单元中,你将要实现以下功能:\n - 如果这个模型和你前面使用的三个模型不一样的话从sklearn中导入一个监督学习模型。\n - 在整个训练集上训练一个监督学习模型。\n - 使用模型中的 'feature_importances_'提取特征的重要性。\nEnd of explanation\n# 导入克隆模型的功能\nfrom sklearn.base import clone\n# 减小特征空间\nX_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]\nX_val_reduced = X_val[X_val.columns.values[(np.argsort(importances)[::-1])[:5]]]\n# 在前面的网格搜索的基础上训练一个“最好的”模型\nclf_on_reduced = (clone(best_clf)).fit(X_train_reduced, y_train)\n# 做一个新的预测\nreduced_predictions = clf_on_reduced.predict(X_val_reduced)\n# 对于每一个版本的数据汇报最终模型的分数\nprint (\"Final Model trained on full data\\n------\")\nprint (\"Accuracy on validation data: {:.4f}\".format(accuracy_score(y_val, best_predictions)))\nprint (\"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, best_predictions, beta = 0.5)))\nprint (\"\\nFinal Model trained on reduced data\\n------\")\nprint (\"Accuracy on validation data: {:.4f}\".format(accuracy_score(y_val, reduced_predictions)))\nprint (\"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, reduced_predictions, beta = 0.5)))\nExplanation: 问题 7 - 提取特征重要性\n观察上面创建的展示五个用于预测被调查者年收入是否大于\\$50,000最相关的特征的可视化图像。\n这五个特征的权重加起来是否超过了0.5?
\n这五个特征和你在问题 6中讨论的特征比较怎么样?
\n如果说你的答案和这里的相近,那么这个可视化怎样佐证了你的想法?
\n如果你的选择不相近,那么为什么你觉得这些特征更加相关?\n回答:\n特征选择\n如果我们只是用可用特征的一个子集的话模型表现会怎么样?通过使用更少的特征来训练,在评价指标的角度来看我们的期望是训练和预测的时间会更少。从上面的可视化来看,我们可以看到前五个最重要的特征贡献了数据中所有特征中超过一半的重要性。这提示我们可以尝试去减小特征空间,简化模型需要学习的信息。下面代码单元将使用你前面发现的优化模型,并只使用五个最重要的特征在相同的训练集上训练模型。\nEnd of explanation\n#TODO test your model on testing data and report accuracy and F score\nExplanation: 问题 8 - 特征选择的影响\n最终模型在只是用五个特征的数据上和使用所有的特征数据上的 F-score 和准确率相比怎么样?\n如果训练时间是一个要考虑的因素,你会考虑使用部分特征的数据作为你的训练集吗?\n回答:\n问题 9 - 在测试集上测试你的模型\n终于到了测试的时候,记住,测试集只能用一次。\n使用你最有信心的模型,在测试集上测试,计算出准确率和 F-score。\n简述你选择这个模型的原因,并分析测试结果\nEnd of explanation"}}},{"rowIdx":2132,"cells":{"Unnamed: 0":{"kind":"number","value":2132,"string":"2,132"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n DefinedAEpTandZ0 media example\nStep1: Measurement of two CPWG lines with different lengths\nThe measurement where performed the 21th March 2017 on a Anritsu MS46524B 20GHz Vector Network Analyser. The setup is a linear frequency sweep from 1MHz to 10GHz with 10'000 points. Output power is 0dBm, IF bandwidth is 1kHz and neither averaging nor smoothing are used.\nCPWGxxx is a L long, W wide, with a G wide gap to top ground, T thick copper coplanar waveguide on ground on a H height substrate with top and bottom ground plane. A closely spaced via wall is placed on both side of the line and the top and bottom ground planes are connected by many vias.\n| Name | L (mm) | W (mm) | G (mm) | H (mm) | T (um) | Substrate |\n| \nStep2: Impedance from the line and from the connector section may be estimated on the step response.\nThe line section is not flat, there is some variation in the impedance which may be induced by manufacturing tolerances and dielectric inhomogeneity.\nNote that the delay on the reflexion plot are twice the effective section delays because the wave travel back and forth on the line.\nConnector discontinuity is about 50 ps long. TL100 line plateau (flat impedance part) is about 450 ps long.\nStep3: Dielectric effective relative permittivity extraction by multiline method\nStep4: Calibration results shows a very low residual noise floor. The error model is well fitted.\nStep5: Relative permittivity $\\epsilon_{e,eff}$ and attenuation $A$ shows a reasonable agreement.\nA better agreement could be achieved by implementing the Kirschning and Jansen microstripline dispersion model or using a linear correction.\nConnectors effects estimation\nStep6: Connector + thru plots shows a reasonable agreement between calibration results and model.\nFinal check"},"code_prompt":{"kind":"string","value":"Python Code:\n%load_ext autoreload\n%autoreload 2\nimport skrf as rf\nimport skrf.mathFunctions as mf\nimport numpy as np\nfrom numpy import real, log, log10, sum, absolute, pi, sqrt\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom scipy.optimize import minimize\nrf.stylely()\nExplanation: DefinedAEpTandZ0 media example\nEnd of explanation\n# Load raw measurements\nTL100 = rf.Network('CPWG100.s2p')\nTL200 = rf.Network('CPWG200.s2p')\nTL100_dc = TL100.extrapolate_to_dc(kind='linear')\nTL200_dc = TL200.extrapolate_to_dc(kind='linear')\nplt.figure()\nplt.suptitle('Raw measurement')\nTL100.plot_s_db()\nTL200.plot_s_db()\nplt.figure()\nt0 = -2\nt1 = 4\nplt.suptitle('Time domain reflexion step response (DC extrapolation)')\nax = plt.subplot(1, 1, 1)\nTL100_dc.s11.plot_z_time_step(pad=2000, window='hamming', z0=50, label='TL100', ax=ax, color='0.0')\nTL200_dc.s11.plot_z_time_step(pad=2000, window='hamming', z0=50, label='TL200', ax=ax, color='0.2')\nax.set_xlim(t0, t1)\nax.xaxis.set_minor_locator(AutoMinorLocator(10))\nax.yaxis.set_minor_locator(AutoMinorLocator(5))\nax.patch.set_facecolor('1.0')\nax.grid(True, color='0.8', which='minor')\nax.grid(True, color='0.4', which='major')\nplt.show()\nExplanation: Measurement of two CPWG lines with different lengths\nThe measurement where performed the 21th March 2017 on a Anritsu MS46524B 20GHz Vector Network Analyser. The setup is a linear frequency sweep from 1MHz to 10GHz with 10'000 points. Output power is 0dBm, IF bandwidth is 1kHz and neither averaging nor smoothing are used.\nCPWGxxx is a L long, W wide, with a G wide gap to top ground, T thick copper coplanar waveguide on ground on a H height substrate with top and bottom ground plane. A closely spaced via wall is placed on both side of the line and the top and bottom ground planes are connected by many vias.\n| Name | L (mm) | W (mm) | G (mm) | H (mm) | T (um) | Substrate |\n| :--- | ---: | ---: | ---: | ---: | ---: | :--- |\n| MSL100 | 100 | 1.70 | 0.50 | 1.55 | 50 | FR-4 |\n| MSL200 | 200 | 1.70 | 0.50 | 1.55 | 50 | FR-4 |\nThe milling of the artwork is performed mechanically with a lateral wall of 45°.\nThe relative permittivity of the dielectric was assumed to be approximately 4.5 for design purpose.\nEnd of explanation\nZ_conn = 53.2 # ohm, connector impedance\nZ_line = 51.4 # ohm, line plateau impedance\nd_conn = 0.05e-9 # s, connector discontinuity delay\nd_line = 0.45e-9 # s, line plateau delay, without connectors\nExplanation: Impedance from the line and from the connector section may be estimated on the step response.\nThe line section is not flat, there is some variation in the impedance which may be induced by manufacturing tolerances and dielectric inhomogeneity.\nNote that the delay on the reflexion plot are twice the effective section delays because the wave travel back and forth on the line.\nConnector discontinuity is about 50 ps long. TL100 line plateau (flat impedance part) is about 450 ps long.\nEnd of explanation\n#Make the missing reflect measurement\n#This is possible because we already have existing calibration\n#and know what the open measurement would look like at the reference plane\n#'refl_offset' needs to be set to -half_thru - connector_length.\nreflect = TL100.copy()\nreflect.s[:,0,0] = 1\nreflect.s[:,1,1] = 1\nreflect.s[:,1,0] = 0\nreflect.s[:,0,1] = 0\n# Perform NISTMultilineTRL algorithm. Reference plane is at the center of the thru.\ncal = rf.NISTMultilineTRL([TL100, reflect, TL200], [1], [0, 100e-3], er_est=3.0, refl_offset=[-56e-3])\nplt.figure()\nplt.title('Corrected lines')\ncal.apply_cal(TL100).plot_s_db()\ncal.apply_cal(TL200).plot_s_db()\nplt.show()\nExplanation: Dielectric effective relative permittivity extraction by multiline method\nEnd of explanation\nfrom skrf.media import DefinedAEpTandZ0\nfreq = TL100.frequency\nf = TL100.frequency.f\nf_ghz = TL100.frequency.f/1e9\nL = 0.1\nA = 0.0\nf_A = 1e9\nep_r0 = 2.0\ntanD0 = 0.001\nf_ep = 1e9\nx0 = [ep_r0, tanD0]\nep_r_mea = cal.er_eff.real\nA_mea = 20/log(10)*cal.gamma.real\ndef model(x, freq, ep_r_mea, A_mea, f_ep):\n ep_r, tanD = x[0], x[1]\n m = DefinedAEpTandZ0(frequency=freq, ep_r=ep_r, tanD=tanD, Z0=50,\n f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')\n ep_r_mod = m.ep_r_f.real\n A_mod = m.alpha * log(10)/20\n return sum((ep_r_mod - ep_r_mea)**2) + 0.001*sum((20/log(10)*A_mod - A_mea)**2)\nres = minimize(model, x0, args=(TL100.frequency, ep_r_mea, A_mea, f_ep),\n bounds=[(2, 4), (0.001, 0.013)])\nep_r, tanD = res.x[0], res.x[1]\nprint('epr={:.3f}, tand={:.4f} at {:.1f} GHz.'.format(ep_r, tanD, f_ep * 1e-9))\nm = DefinedAEpTandZ0(frequency=freq, ep_r=ep_r, tanD=tanD, Z0=50,\n f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')\nplt.figure()\nplt.suptitle('Effective relative permittivity and attenuation')\nplt.subplot(2,1,1)\nplt.ylabel('$\\epsilon_{r,eff}$')\nplt.plot(f_ghz, ep_r_mea, label='measured')\nplt.plot(f_ghz, m.ep_r_f.real, label='model')\nplt.legend()\nplt.subplot(2,1,2)\nplt.xlabel('Frequency [GHz]')\nplt.ylabel('A (dB/m)')\nplt.plot(f_ghz, A_mea, label='measured')\nplt.plot(f_ghz, 20/log(10)*m.alpha, label='model')\nplt.legend()\nplt.show()\nExplanation: Calibration results shows a very low residual noise floor. The error model is well fitted.\nEnd of explanation\n# note: a half line is embedded in connector network\ncoefs = cal.coefs\nr = mf.sqrt_phase_unwrap(coefs['forward reflection tracking'])\ns1 = np.array([[coefs['forward directivity'],r],\n [r, coefs['forward source match']]]).transpose()\nconn = TL100.copy()\nconn.name = 'Connector'\nconn.s = s1\n# delay estimation,\nphi_conn = (np.angle(conn.s[:500,1,0]))\nz = np.polyfit(f[:500], phi_conn, 1)\np = np.poly1d(z)\ndelay = -z[0]/(2*np.pi)\nprint('Connector + half thru delay: {:.0f} ps'.format(delay * 1e12))\nprint('TDR readed half thru delay: {:.0f} ps'.format(d_line/2 * 1e12))\nd_conn_p = delay - d_line/2\nprint('Connector delay: {:.0f} ps'.format(d_conn_p * 1e12))\n# connector model with guessed loss\nhalf = m.line(d_line/2, 's', z0=Z_line)\nmc = DefinedAEpTandZ0(m.frequency, ep_r=1, tanD=0.025, Z0=50,\n f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')\nleft = mc.line(d_conn_p, 's', z0=Z_conn)\nright = left.flipped()\ncheck = mc.thru() ** left ** half ** mc.thru()\nplt.figure()\nplt.suptitle('Connector + half thru comparison')\nplt.subplot(2,1,1)\nconn.plot_s_deg(1, 0, label='measured')\ncheck.plot_s_deg(1, 0, label='model')\nplt.ylabel('phase (rad)')\nplt.legend()\nplt.subplot(2,1,2)\nconn.plot_s_db(1, 0, label='Measured')\ncheck.plot_s_db(1, 0, label='Model')\nplt.xlabel('Frequency (GHz)')\nplt.ylabel('Insertion Loss (dB)')\nplt.legend()\nplt.show()\nExplanation: Relative permittivity $\\epsilon_{e,eff}$ and attenuation $A$ shows a reasonable agreement.\nA better agreement could be achieved by implementing the Kirschning and Jansen microstripline dispersion model or using a linear correction.\nConnectors effects estimation\nEnd of explanation\nDUT = m.line(d_line, 's', Z_line)\nDUT.name = 'model'\nCheck = m.thru() ** left ** DUT ** right ** m.thru()\nCheck.name = 'model with connectors'\nplt.figure()\nTL100.plot_s_db()\nCheck.plot_s_db(1,0, color='k')\nCheck.plot_s_db(0,0, color='k')\nplt.show()\nCheck_dc = Check.extrapolate_to_dc(kind='linear')\nplt.figure()\nplt.suptitle('Time domain step-response')\nax = plt.subplot(1,1,1)\nTL100_dc.s11.plot_z_time_step(pad=2000, window='hamming', label='Measured', ax=ax, color='k')\nCheck_dc.s11.plot_z_time_step(pad=2000, window='hamming', label='Model', ax=ax, color='b')\nt0 = -2\nt1 = 4\nax.set_xlim(t0, t1)\nax.xaxis.set_minor_locator(AutoMinorLocator(10))\nax.yaxis.set_minor_locator(AutoMinorLocator(5))\nax.patch.set_facecolor('1.0')\nax.grid(True, color='0.8', which='minor')\nax.grid(True, color='0.5', which='major')\nExplanation: Connector + thru plots shows a reasonable agreement between calibration results and model.\nFinal check\nEnd of explanation"}}},{"rowIdx":2133,"cells":{"Unnamed: 0":{"kind":"number","value":2133,"string":"2,133"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n BigO, Complexity, Time Complexity, Space Complexity, Algorithm Analysis\ncf. pp. 40 McDowell, 6th Ed. VI BigO\ncf. 2.2. What Is Algorithm Analysis?\nStep1: A good basic unit of computation for comparing the summation algorithms might be to count the number of assignment statements performed.\nStep3: cf. 2.4. An Anagram Detection Example\nStep4: 2.4.2. Sort and Compare Solution 2"},"code_prompt":{"kind":"string","value":"Python Code:\ndef sumOfN(n):\n theSum = 0\n for i in range(1,n+1):\n theSum = theSum + i\n \n return theSum\n \nprint(sumOfN(10))\ndef foo(tom):\n fred = 0 \n for bill in range(1,tom+1):\n barney = bill\n fred = fred + barney\n \n return fred\nprint(foo(10))\nimport time \ndef sumOfN2(n):\n start = time.time()\n \n theSum = 0 # 1 assignment \n for i in range(1,n+1):\n theSum = theSum + i # n assignments \n \n end = time.time()\n \n return theSum, end-start # (1 + n) assignements\nfor i in range(5):\n print(\"Sum is %d required %10.7f seconds \" % sumOfN2(10000) )\nfor i in range(5):\n print(\"Sum is %d required %10.7f seconds \" % sumOfN2(100000) )\nfor i in range(5):\n print(\"Sum is %d required %10.7f seconds \" % sumOfN2(1000000) )\ndef sumOfN3(n):\n start=time.time()\n theSum = (n*(n+1))/2\n end=time.time()\n return theSum, end-start\nprint(sumOfN3(10))\nfor i in range(5):\n print(\"Sum is %d required %10.7f seconds \" % sumOfN3(10000*10**(i)) )\nExplanation: BigO, Complexity, Time Complexity, Space Complexity, Algorithm Analysis\ncf. pp. 40 McDowell, 6th Ed. VI BigO\ncf. 2.2. What Is Algorithm Analysis?\nEnd of explanation\ndef findmin(X):\n start=time.time()\n minval= X[0]\n for ele in X:\n if minval > ele:\n minval = ele\n end=time.time()\n return minval, end-start\ndef findmin2(X):\n start=time.time()\n L = len(X)\n overallmin = X[0]\n for i in range(L):\n minval_i = X[i]\n for j in range(L):\n if minval_i > X[j]:\n minval_i = X[j]\n if overallmin > minval_i:\n overallmin = minval_i\n end=time.time()\n return overallmin, end-start\nimport random\nfor i in range(5):\n print(\"findmin is %d required %10.7f seconds\" % findmin( [random.randrange(1000000) for _ in range(10000*10**i)] ) )\nfor i in range(5):\n print(\"findmin2 is %d required %10.7f seconds\" % findmin2( [random.randrange(1000000) for _ in range(10000*10**i)] ) )\nExplanation: A good basic unit of computation for comparing the summation algorithms might be to count the number of assignment statements performed.\nEnd of explanation\ndef anagramSolution(s1,s2):\n @fn anagramSolution\n @details 1 string is an anagram of another if the 2nd is simply a rearrangement of the 1st\n 'heart' and 'earth' are anagrams\n 'python' and 'typhon' are anagrams\n \n A = list(s2) # Python strings are immutable, so make a list\n pos1 = 0 \n stillOK = True\n while pos1 < len(s1) and stillOK:\n pos2 = 0 \n found = False\n while pos2 < len(A) and not found:\n if s1[pos1] == A[pos2]: # given s1[pos1], try to find it in A, changing pos2\n found = True\n else:\n pos2 = pos2+1\n \n if found:\n A[pos2] = None\n else:\n stillOK = False\n pos1 = pos1 + 1\n return stillOK\nanagramSolution(\"heart\",\"earth\")\nanagramSolution(\"python\",\"typhon\")\nanagramSolution(\"anagram\",\"example\")\nExplanation: cf. 2.4. An Anagram Detection Example\nEnd of explanation\ndef anagramSolution2(s1,s2):\n \nExplanation: 2.4.2. Sort and Compare Solution 2\nEnd of explanation"}}},{"rowIdx":2134,"cells":{"Unnamed: 0":{"kind":"number","value":2134,"string":"2,134"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Problem Set 12\nFirst the exercises\nStep1: Let us load up a sample dataset.\nStep6: Now construct a KNN classifier\nStep7: Calculate accuracy on this very small subset.\nStep8: Let's time these different methods to see if the \"faster_preds\" is actually faster\nStep11: Okay now, let us try the clustering algorithm.\nStep12: Let us load the credit card dataset and extract a small dataframe of numerical features to test on.\nStep14: Now let us write our transformation function.\nStep15: Now let us build some simple loss functions for 1d labels.\nStep17: Now let us define the find split function.\nStep18: One hot encode our dataset\nStep20: Test this to see if it is reasonable\nStep21: Test this out.\nStep22: The naive option"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nimport pandas as pd\nimport keras\nfrom keras.datasets import mnist\nExplanation: Problem Set 12\nFirst the exercises:\n* Let $\\mu=\\frac{1}{|S|}\\sum_{x_i\\in S} x_i$ let us expand \n\\begin{align}\n\\sum_{x_i\\in S} ||x_i-\\mu||^2 &=\\sum_{x_i\\in S}(x_i-\\mu)^T(x_i-\\mu)\\\n &= |S|\\mu^T\\mu+\\sum_{x_i\\in S}\\left( x_i^Tx_i-2\\mu^T x_i \\right) \\\n &= \\frac{1}{|S|}\\left(\\sum_{(x_i,x_j)\\in S\\times S} x_i^T x_j\\right) + \\sum_{x_i\\in S} \\left( x_i^Tx_i-\\frac{2}{|S|}\\left(\\sum_{x_j\\in S} x_j^T x_i\\right)\\right)\\\n &= \\sum_{x_i\\in S} x_i^Tx_i-\\frac{1}{|S|}\\sum_{(x_i,x_j)\\in S\\times S} x_j^T x_i\\\n &= \\frac{1}{2}\\left(\\sum_{x_i\\in S} x_i^Tx_i-\\frac{2}{|S|}\\sum_{(x_i,x_j)\\in S\\times S} x_j^T x_i+\\sum_{x_j\\in S} x_j^Tx_j \\right)\\\n &= \\frac{1}{2|S|}\\left(\\sum_{(x_i,x_j)\\in S\\times S} x_i^Tx_i-2\\sum_{(x_i,x_j)\\in S\\times S} x_j^T x_i+\\sum_{(x_i,x_j)\\in S\\times S} x_j^Tx_j \\right)\\\n &= \\frac{1}{2|S|}\\sum_{(x_i,x_j)\\in S\\times S} (x_i-x_j)^T(x_i-x_j)\\\n &= \\frac{1}{2|S|}\\sum_{(x_i,x_j)\\in S\\times S} ||x_i-x_j||^2\n\\end{align}\nas desired.\n* So the $K$-means algorithm consists of iterations of two steps, we will show that either the algorithm has stabilized or that each of these steps decreases\n[ T=\\sum_{c=1}^K \\sum_{x_i\\in S_c}||x_i-\\mu_c||^2,] where $S_c$ is the $c$th cluster and $\\mu_c$ is the previously defined mean over that cluster. The sequence defined by these sums is therefore monotonically decreasing and bounded below so it will eventually approach the maximal lower bound. \nThe value of $\\mu$ that minimizes $\\sum_{x_i\\in S_c}||x_i-\\mu||^2$ is $\\frac{1}{|S_c|}\\sum_{x_i\\in S_c} x_i$ (we can check this by setting the derivative with respect to $\\mu$ to zero). So updating the mean estimates will never increase $T$. If we do not update the mean estimates than the cluster assignments will not change on the next step. \nThe next step maps samples to their closest mean which can only decrease the sum $T$.\nPython Lab\nNow let us load our standard libraries.\nEnd of explanation\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train.shape\nimport matplotlib.pyplot as plt\n%matplotlib inline\nrandix = np.random.randint(0,60000)\nplt.imshow(x_train[randix])\nprint(\"Label is {}.\".format(y_train[randix]))\nx_train_f = x_train.reshape(60000,-1)\nx_train_f.shape\nx_test_f = x_test.reshape(-1, 28**2)\nx_test_f.shape\nfrom sklearn.preprocessing import OneHotEncoder as OHE\nohe = OHE(sparse = False)\ny_train_ohe = ohe.fit_transform(y_train.reshape(-1,1))\ny_test_ohe = ohe.fit_transform(y_test.reshape(-1,1))\nnp.argmax(y_train_ohe[randix]) == y_train[randix]\nExplanation: Let us load up a sample dataset.\nEnd of explanation\nfrom scipy.spatial.distance import cdist\nfrom sklearn.neighbors import KDTree\nclass KNNClassifier(object):\n def fit(self,x,y,k=1,fun=lambda x: np.mean(x,axis=0)):\n Fits a KNN regressor.\n Args:\n x (numpy array) Array of samples indexed along first axis.\n y (numpy array) Array of corresponding labels.\n k (int) the number of neighbors\n fun (function numpy array --> desired output) Function to be applied to k-nearest\n neighbors for predictions\n \n self.x = x[:]\n self.y = y[:]\n self.k = k\n self.f = fun\n self.tree = KDTree(self.x)\n \n def predict_one(self, sample):\n \n Run prediction on sample\n \n Args:\n new_x (numpy array) sample\n \n \n dists = cdist(sample.reshape(1,-1),self.x)\n ix = np.argpartition(dists,self.k-1)[0,0:self.k]\n return self.f(self.y[ix])\n \n def predict(self, samples):\n \n Run predictions on list.\n Args:\n samples (numpy array) samples\n \n return np.array([self.predict_one(x) for x in samples])\n \n def faster_predict(self,samples):\n \n Run faster predictions on list.\n Args:\n samples (numpy array) samples\n \n _, ixs = self.tree.query(samples, k=self.k)\n #print(ixs)\n return np.array([self.f(self.y[ix]) for ix in ixs])\n \nclassifier = KNNClassifier()\nclassifier.fit(x_train_f, y_train_ohe, k=1)\npreds=classifier.predict(x_test_f[:500])\nExplanation: Now construct a KNN classifier\nEnd of explanation\nnp.mean(np.argmax(preds,axis=1)==y_test[:500])\nfaster_preds = classifier.faster_predict(x_test_f[:500])\nnp.mean(np.argmax(faster_preds,axis=1)==y_test[:500])\nExplanation: Calculate accuracy on this very small subset.\nEnd of explanation\nfrom timeit import default_timer as timer\nstart = timer()\nclassifier.predict(x_test_f[:500])\nend = timer()\nprint(end-start)\nstart = timer()\nclassifier.faster_predict(x_test_f[:500])\nend = timer()\nprint(end-start)\nExplanation: Let's time these different methods to see if the \"faster_preds\" is actually faster:\nEnd of explanation\ndef cluster_means(x,cluster_assignments,k):\n \n Return the new cluster means and the within cluster squared distance given the cluster assignments\n \n cluster_counter = np.zeros((k,1))\n cluster_means = np.zeros((k, x.shape[1]))\n for cluster, pt in zip(cluster_assignments, x):\n #print(x)\n cluster_means[cluster] += pt\n cluster_counter[cluster]+=1\n cluster_means = cluster_means/cluster_counter\n \n wcss = 0.\n for cluster, pt in zip(cluster_assignments, x):\n wcss+=np.sum((pt-cluster_means[cluster])**2)\n \n return cluster_means, wcss\nclass KMeansCluster(object):\n \n #Fit a clustering object on a dataset x consisting of samples on each row\n #by the K-means algorithm into k clusters\n def fit(self,x,k):\n \n Fit k-means clusterer\n \n Args:\n x (numpy array) samples\n k (int) number of clusters\n \n num_samples, num_features = x.shape[0], x.shape[1]\n #Randomly assign clusters\n cluster_assignments = np.random.randint(0,k,num_samples)\n \n #initialize\n cluster_mus = np.zeros((k,num_features))\n \n #update\n new_cluster_mus, wcss = cluster_means(x,cluster_assignments,k)\n count = 1\n while (cluster_mus!=new_cluster_mus).any() and count < 10**3:\n count += 1\n print(\"Iteration {:3d}, WCSS = {:10f}\".format(count,wcss),end=\"\\r\")\n cluster_mus = new_cluster_mus\n #calculate distances\n distances = cdist(x,cluster_mus, metric = 'sqeuclidean')\n np.argmin(distances, axis = 1, out = cluster_assignments)\n new_cluster_mus, wcss = cluster_means(x,cluster_assignments,k)\n self.cluster_means = cluster_means\n self.cluster_assignments = cluster_assignments\n self.x = x[:]\n self.wcss = wcss\nclusterer = KMeansCluster()\nclusterer.fit(x_train_f,10)\nclusterer2 = KMeansCluster()\nclusterer2.fit(x_train_f,10)\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_train, clusterer2.cluster_assignments)\ncluster_samples = clusterer2.x[clusterer2.cluster_assignments == 0]\nplt.imshow(cluster_samples[0].reshape(28,28))\nplt.imshow(cluster_samples[1].reshape(28,28))\nplt.imshow(cluster_samples[23].reshape(28,28))\nplt.imshow(cluster_samples[50].reshape(28,28))\nnp.mean(classifier.faster_predict(cluster_samples),axis=0)\nExplanation: Okay now, let us try the clustering algorithm.\nEnd of explanation\nbig_df = pd.read_csv(\"UCI_Credit_Card.csv\")\nbig_df.head()\nlen(big_df)\nlen(big_df.dropna())\ndf = big_df.drop(labels = ['ID'], axis = 1)\nlabels = df['default.payment.next.month']\ndf.drop('default.payment.next.month', axis = 1, inplace = True)\nnum_samples = 25000\ntrain_x, train_y = df[0:num_samples], labels[0:num_samples]\ntest_x, test_y = df[num_samples:], labels[num_samples:]\ntest_x.head()\ntrain_y.head()\nExplanation: Let us load the credit card dataset and extract a small dataframe of numerical features to test on.\nEnd of explanation\nclass bin_transformer(object):\n \n def __init__(self, df, num_quantiles = 2):\n #identify list of quantiles\n self.quantiles = df.quantile(np.linspace(1./num_quantiles, 1.-1./num_quantiles,num_quantiles-1))\n \n \n def transform(self, df):\n \n Args:\n df (pandas dataframe) : dataframe to transform\n \n Returns:\n new (pandas dataframe) : new dataframe where for every feature of the original there will be \n num_quantiles-1 features corresponding to whether or not the original values where greater\n than or equal to the corresponding quantile.\n fns (dictionary (string,float)) returns dictionary of quantiles\n \n \n new = pd.DataFrame()\n fns = {}\n for col_name in df.axes[1]:\n for ix, q in self.quantiles.iterrows():\n quart = q[col_name]\n new[col_name+str(ix)] = (df[col_name] >= quart)\n fn = quart\n fns[col_name+str(ix)] = [col_name, fn]\n return new, fns\ntransformer = bin_transformer(train_x,2)\ntrain_x_t, tr_fns = transformer.transform(train_x)\ntest_x_t, test_fns = transformer.transform(test_x)\ntrain_x_t.head()\nExplanation: Now let us write our transformation function.\nEnd of explanation\ndef bdd_cross_entropy(pred, label):\n return np.mean(-np.sum(label*np.log(pred+10**(-8)),axis=1))\ndef MSE(pred,label):\n return np.mean(np.sum((pred-label)**2, axis=1))\ndef acc(pred,label):\n return np.mean(np.argmax(pred,axis=1)==np.argmax(label, axis=1))\ndef SSE(x,y):\n return np.sum((x-y)**2)\ndef gini(x,y):\n return 1-np.sum(np.mean(y,axis=0)**2)\nExplanation: Now let us build some simple loss functions for 1d labels.\nEnd of explanation\ndef find_split(x, y, loss, verbose = False):\n \n Args:\n x (dataframe) : dataframe of boolean values\n y (dataframe (1 column)) : dataframe of labeled values\n loss (function: (yvalue, dataframe of labels)-->float) : calculates loss for prediction of yvalue\n for a dataframe of true values.\n verbose (bool) : whether or not to include debugging info\n \n min_ax = None\n N = x.shape[0]\n base_loss = loss(np.mean(y,axis=0),y)\n min_loss = base_loss\n for col_name in x.axes[1]:\n mask = x[col_name]\n num_pos = np.sum(mask)\n num_neg = N - num_pos\n if num_neg*num_pos == 0:\n continue\n pos_y = np.mean(y[mask], axis = 0)\n neg_y = np.mean(y[~mask], axis = 0)\n l = (num_pos*loss(pos_y, y[mask]) + num_neg*loss(neg_y, y[~mask]))/N\n if verbose:\n print(\"Column {0} split has improved loss {1}\".format(col_name, base_loss-l))\n if l < min_loss:\n min_loss = l\n min_ax = col_name\n return min_ax, min_loss, base_loss-min_loss\n \nExplanation: Now let us define the find split function.\nEnd of explanation\nohe = OHE(sparse = False)\ntrain_y_ohe = ohe.fit_transform(train_y.values.reshape(-1,1))\ntrain_y_ohe[0:5],train_y.values[0:5]\ntest_y_ohe = ohe.transform(test_y.values.reshape(-1,1))\nExplanation: One hot encode our dataset\nEnd of explanation\nfind_split(train_x_t, train_y_ohe, bdd_cross_entropy, verbose = False)\nnp.mean(train_y_ohe[train_x_t['LIMIT_BAL0.5']],axis=0)\nnp.mean(train_y_ohe[~train_x_t['LIMIT_BAL0.5']],axis = 0)\nnp.mean(train_y_ohe,axis=0)\n#Slow but simple\nclass decision_tree(object):\n def __init__(self):\n self.f = None\n \n def fit(self, x,y,depth=5,loss=MSE, minsize = 1, quintiles = 2, verbose = False):\n #Construct default function\n mu = np.mean(y, axis=0)\n \n self.f = lambda a: mu \n \n # Check our stopping criteria\n if(x.shape[0]<=minsize or depth == 0):\n return \n \n # transform our data\n tr = bin_transformer(x, quintiles)\n tr_x, fns = tr.transform(x)\n split, split_loss, improvement = find_split(tr_x,y,loss)\n \n if verbose:\n print(\"Improvement: {}\".format(improvement))\n #if no good split was found return\n if split == None:\n return\n \n # Build test function\n col_to_split = fns[split][0]\n splitter = lambda a: (a[col_to_split] >= fns[split][1])\n mask = tr_x[split]\n left = decision_tree()\n right = decision_tree()\n left.fit(x[~mask],y[~mask],depth-1,loss, minsize, quintiles)\n right.fit(x[mask],y[mask],depth-1,loss, minsize, quintiles)\n def g(z):\n if(splitter(z)):\n return right.f(z)\n else:\n return left.f(z)\n self.f = g\n def predict(self, x):\n \n Used for bulk prediction\n \n num_samples = x.shape[0]\n \n return np.array([self.f(x.iloc[ix,:]) for ix in range(num_samples)])\n \nExplanation: Test this to see if it is reasonable:\nEnd of explanation\ndt = decision_tree()\ndt.fit(train_x, train_y_ohe, loss = MSE, minsize = 1, depth = 6, quintiles = 50)\ndt.predict(test_x.iloc[0:3,:]), test_y_ohe[0:3]\npreds = dt.predict(train_x)\nnp.mean(np.argmax(preds, axis=1)==train_y)\nExplanation: Test this out.\nEnd of explanation\n1-np.mean(test_y)\nclass gradient_boosting_trees(object):\n \n def fit(self, x, y, depth = 2, quintiles = 10, num_trees = 10):\n self.forest = [None]*num_trees\n cur_y = y[:]\n \n for ix in range(num_trees):\n self.forest[ix] = decision_tree()\n self.forest[ix].fit(x, cur_y, loss=MSE, depth = depth, quintiles = quintiles, minsize = 1)\n \n preds = self.forest[ix].predict(x)\n cur_y = cur_y - preds\n \n def predict(self,x):\n s = 0.\n preds = [tree.predict(x) for tree in self.forest]\n for t in preds:\n s+=t\n return s\n \n \nforest = gradient_boosting_trees()\ntrain_y_ohe = ohe.fit_transform(train_y.values.reshape(-1,1))\nforest.fit(train_x, train_y_ohe, depth = 20, num_trees = 5, quintiles = 20)\nforest.predict(test_x.iloc[0:3,:]), test_y_ohe[0:3]\nfor_preds = forest.predict(train_x)\nfor_preds[0:5,:]\ntrain_y_ohe[0:3]\nnp.mean(np.argmax(for_preds, axis=1)==train_y)\nfor_preds = forest.predict(test_x)\nnp.mean(np.argmax(for_preds, axis=1)==test_y)\nfrom sklearn import tree\nsktree = tree.DecisionTreeClassifier(max_depth=20)\nsktree.fit(train_x, train_y_ohe)\nExplanation: The naive option:\nEnd of explanation"}}},{"rowIdx":2135,"cells":{"Unnamed: 0":{"kind":"number","value":2135,"string":"2,135"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Simulation Runs 3 – 16 based on experiment fits\n

Table of Contents

\nRun 3: Predict YFP synthesis rate of initiation mutants based on fit of stall strengths to single mutant data (for Fig 4, Fig. 4 supplement 1A–G)\nEnd of explanation\n%%writefile simulation_run_4.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run4/'))\nmrnafiles = ['../annotations/simulations/run4/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run4_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run4_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', \n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run4/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(30):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_4.py',\n str(index)\n ])\nExplanation: Run 4: Predict YFP synthesis rate of CTC, CTT double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1A, 1B)\nEnd of explanation\n%%writefile simulation_run_5.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run5/'))\nmrnafiles = ['../annotations/simulations/run5/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run5_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run5_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run5_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv',\n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run5/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(20):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_5.py',\n str(index)\n ])\nExplanation: Run 5: Predict YFP synthesis rate of CTC distance mutants based on fit of stall strengths to single mutant data (for Fig. 6 figure supplement 1)\nEnd of explanation\n%%writefile simulation_run_14.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run14/'))\nmrnafiles = ['../annotations/simulations/run14/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run14_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run14_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run14_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation', \n '--trna-concn', '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', \n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run14/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(15):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_14.py',\n str(index)\n ])\nExplanation: Run 14: Predict YFP synthesis rate of serine initiation mutants based on fit of stall strengths to single mutant data (for Fig. 4 supplement 1H)\nEnd of explanation\n%%writefile simulation_run_15.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run15/'))\nmrnafiles = ['../annotations/simulations/run15/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run15_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run15_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run15_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', \n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run15/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(15):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_15.py',\n str(index)\n ])\nExplanation: Run 15: Predict YFP synthesis rate of serine double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1C)\nEnd of explanation\n%%writefile simulation_run_16.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run16/'))\nmrnafiles = ['../annotations/simulations/run16/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run16_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run16_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run16_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', \n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run16/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(18):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_16.py',\n str(index)\n ])\nExplanation: Run 16: Predict YFP synthesis rate of CTA multiple mutants based on fit of stall strengths to single mutant data (for Fig. 5)\nEnd of explanation\n%%writefile simulation_run_6.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run6/'))\nmrnafiles = ['../annotations/simulations/run6/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv',\n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run6/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(8):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '10', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_6.py',\n str(index)\n ])\nExplanation: Run 6: Vary initiation rate systematically for 3 different models (for Fig. 3A)\nEnd of explanation\n%%writefile simulation_run_7.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run7/'))\nmrnafiles = ['../annotations/simulations/run7/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv',\n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run7/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(9):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_7.py',\n str(index)\n ])\nExplanation: Run 7: Vary number of stall sites systematically for 3 different models (for Fig. 3B)\nEnd of explanation\n%%writefile simulation_run_8.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run8/'))\nmrnafiles = ['../annotations/simulations/run8/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv',\n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run8/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(238):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '10', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_8.py',\n str(index)\n ])\nExplanation: Run 8: Vary distance between stall sites systematically for 3 different models (for Fig. 3C)\nEnd of explanation\n%%writefile simulation_run_9.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\nimport numpy as np\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = ['../annotations/simulations/run4/yfp_cta18_initiationrate_0.3.csv']\n# use experimental fits for stall strengths from run 4\nterminationandStallStrengths = [\n ('--5prime-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_5primepreterm.tsv'),\n ('--background-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'),\n ('--selective-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n for typeOfTermination, stallstrengthfile in terminationandStallStrengths:\n for terminationRate in [0] + list(10.0**np.arange(-2,1.01,0.05)):\n currentindex += 1\n if currentindex != jobindex:\n continue\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv',\n typeOfTermination, \n '%0.4g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run9/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(200):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '20', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_9.py',\n str(index)\n ])\nExplanation: Run 9 : Vary abortive termination rate systematically for 3 different models (for Fig. 7)\nEnd of explanation\n%%writefile simulation_run_11.py\n#!/usr/bin/env python\n#SBATCH --mem=8000\nimport subprocess as sp\nimport os\nimport sys\njobindex = int(sys.argv[1])\ncurrentindex = -1\nmrnafiles = list(filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run11/')))\nmrnafiles = ['../annotations/simulations/run11/' + File for File in mrnafiles]\nterminationandStallStrengths = [\n ('--5prime-preterm-rate',0,'../processeddata/simulations/run11_stallstrengthfits_trafficjam.tsv'),\n ('--5prime-preterm-rate',1,'../processeddata/simulations/run11_stallstrengthfits_5primepreterm.tsv'),\n ('--selective-preterm-rate',1,'../processeddata/simulations/run11_stallstrengthfits_selpreterm.tsv'),\n ]\nfor mrnafile in mrnafiles:\n currentindex += 1\n if currentindex != jobindex:\n continue\n for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths:\n cmd = ' '.join([\n './reporter_simulation',\n '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', \n typeOfTermination, \n '%0.2g'%terminationRate,\n '--threshold-accommodation-rate', '22',\n '--output-prefix','../rawdata/simulations/run11/',\n '--stall-strength-file', stallstrengthfile,\n '--input-genes', mrnafile\n ])\n sp.check_output(cmd, shell=True) \nimport subprocess as sp\n# loop submits each simulation to a different node of the cluster\nfor index in range(20):\n sp.check_output([\n 'sbatch', # for SLURM cluster; this line can be commented out if running locally\n '-t', '30', # for SLURM cluster; this line can be commented out if running locally\n '-n', '1', # for SLURM cluster; this line can be commented out if running locally\n 'simulation_run_11.py',\n str(index)\n ])\nExplanation: Run 11: Predict YFP synthesis rate of CTA distance mutants based on fit of stall strengths to single mutant data (for Fig. 6)\nEnd of explanation"}}},{"rowIdx":2136,"cells":{"Unnamed: 0":{"kind":"number","value":2136,"string":"2,136"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n The python Language Reference\nCPython -> Python implmentation in C
\nPython program is read by a parser, input to the parser is a stream of tokens, generated by lexical analyzer.
\n
    \n
  1. Logical Lines -> The end of a logical line is represented by NEWLINE
  2. \n
  3. Physical Lines -> It is a sequence of characters terminated by an end-of-line sequence
  4. \n
  5. Comments -> It starts with #\n
\nA comment in the first or second line of the form coding[=\nStep1: Everything is represented by objects in python or relation among objects. Every object has a value, type, identity. Identity can be thought of as address of object in memory which never changes. The 'is' operator compares the identity of the objects. Type of an object is also unchangable.
\nList of types available in python
    \n
  1. None -> Objects of type None have value None
  2. \n
  3. NotImplemented -> Same as None but it is returned when methods do not implement the operations on the operand
  4. \n
  5. Elipses -> Singled value accessed using ... or Ellipses. It contains int, bool, float(Real), complex
  6. \n
  7. Sequences -> We can use len() to find number of itemsin the sequencem, also support slicing. These are strings. Tuples, Bytes. ord() converts string to int, chr() converts char to int, str.encode() to convert str to bytes and bytes.decode() to convert bytes to str. Mutable sequences are Lists, Byte Arrays
  8. \n
  9. Set types -> They cannot be indexed by subscripts only iterated, unique with no repetitions. These are sets and frozen sets
  10. \n
  11. Mapping -> Dictionaries
  12. \n
\nUser definded functions\n
    \n
  • \\__doc\\__ -> The description of the object
  • \n
  • \\__name\\__ -> It tells the function's name
  • \n
  • \\__qualname\\__ -> It shows the path from a module's global scope to a class.
  • \n
  • \\__module\\__ -> name of module function was defined in
  • \n
  • \\__code\\__
  • \n
  • \\__globals\\__ -> reference to the global variables of a function
  • \n
  • \\__dict\\__ -> Namespace supporting arbitrary functions attributes
  • \n
  • \\__closure\\__ -> None or tuple of cells containing binding of function's free variables
  • \n
\n
\nInstance methods\n
    \n
  • \\__self\\__ -> It is the class instance object
  • \n
  • \\__doc\\__
  • \n
  • \\__name\\__
  • \n
  • \\__module\\__
  • \n
  • \\__bases\\__ -> Tuples containing the base classes
  • \n
\n
\nGenerator functions -> which uses yield. It return an iterator object which can be used to execute the body of the function.
\nasync def is called a coroutine function, which returns a coroutine object. It contains await, async with, async for.
\nasync def which uses yield is called asyncronous generator function, whose return object can be used in async for
\n

__Class Instances__\nA class instance has a namespace implemented as a dictionary where attribute references are first searched. When not found than instance's class has also a atrribute by that name. If not found than _self_ is checked and if still not found than \\__getattr\\__() is checked. Attribute assignments and deletions always update the instance's dictionary.<\\p>\nVarious internal types used internally by the interpreter\n

    \n
  1. Code objects -> Represent byte-complied execuatble Python code or bytecode. They contain no references to mutable objects.
  2. \n
  3. Frame objects -> Represent execution frames, occur in traceback objects. Support one method called frame.clear() which clears all references to local variables.
  4. \n
  5. Traceback objects -> Represent a stack trace of an exception. It occurs when an exception occurs
  6. \n
  7. Slice objects -> Slices of \\__getitem\\__() methods or by slice().
  8. \n
  9. Static method objects -> Wrapper around any other object. When a static method object is retrieved from a class or a class instance, the object actually returns a wrapper object.
  10. \n
  11. Class method objects -> A wrapper aroudn another classes or class instances.
  12. \n
\nSpecial method names\n
    \n
  1. object.\\__new\\__(cls[]) -> To create a new instance class cls
  2. \n
  3. object.\\__init\\__(self[]) -> A new instance when created by 1) then this it is called first before sending the control to the caller.
  4. \n
  5. object.\\__del\\__(self) -> To destroy the object
  6. \n
  7. object.\\__repr\\__(self) -> To compute the official string representation of an object.
  8. \n
\nBy default instances of classes have a dictioanry for attribute storage. This space consumption can become large when a large number of instances of a class are created. This default can be overridden by using \\__slots\\__ in a class definition. It reserves sapce for the declared vaariabels and prevents automatic creation of dict of each instance.
\nIf you want to dynamically declare new vairbales then add \\__dict\\__ to the sequence of strings in \\__slots\\__ declaration.\n__Metaclasses__
\nThe class creation process can be customized by passing the metaclass keyword argument in the class definition line or by inheriting from an existing class that included such ar argument."},"code_prompt":{"kind":"string","value":"Python Code:\ndef \\\nquicksort():\n pass\nExplanation: The python Language Reference\nCPython -> Python implmentation in C
\nPython program is read by a parser, input to the parser is a stream of tokens, generated by lexical analyzer.
\n
    \n
  1. Logical Lines -> The end of a logical line is represented by NEWLINE
  2. \n
  3. Physical Lines -> It is a sequence of characters terminated by an end-of-line sequence
  4. \n
  5. Comments -> It starts with #\n
\nA comment in the first or second line of the form coding[=:]\\s*([-\\w.]+) is processed as a encoding declaration.
\nTwo or more physical lines can be interpretted as logical lines by using backslash().
\nLines in parenthesis can be split without backslahes
\nEnd of explanation\nclass Meta(type):\n pass\nclass MyClass(metaclass = Meta):\n pass\nclass MySubclass(MyClass):\n pass\nExplanation: Everything is represented by objects in python or relation among objects. Every object has a value, type, identity. Identity can be thought of as address of object in memory which never changes. The 'is' operator compares the identity of the objects. Type of an object is also unchangable.
\nList of types available in python
    \n
  1. None -> Objects of type None have value None
  2. \n
  3. NotImplemented -> Same as None but it is returned when methods do not implement the operations on the operand
  4. \n
  5. Elipses -> Singled value accessed using ... or Ellipses. It contains int, bool, float(Real), complex
  6. \n
  7. Sequences -> We can use len() to find number of itemsin the sequencem, also support slicing. These are strings. Tuples, Bytes. ord() converts string to int, chr() converts char to int, str.encode() to convert str to bytes and bytes.decode() to convert bytes to str. Mutable sequences are Lists, Byte Arrays
  8. \n
  9. Set types -> They cannot be indexed by subscripts only iterated, unique with no repetitions. These are sets and frozen sets
  10. \n
  11. Mapping -> Dictionaries
  12. \n
\nUser definded functions\n
    \n
  • \\__doc\\__ -> The description of the object
  • \n
  • \\__name\\__ -> It tells the function's name
  • \n
  • \\__qualname\\__ -> It shows the path from a module's global scope to a class.
  • \n
  • \\__module\\__ -> name of module function was defined in
  • \n
  • \\__code\\__
  • \n
  • \\__globals\\__ -> reference to the global variables of a function
  • \n
  • \\__dict\\__ -> Namespace supporting arbitrary functions attributes
  • \n
  • \\__closure\\__ -> None or tuple of cells containing binding of function's free variables
  • \n
\n
\nInstance methods\n
    \n
  • \\__self\\__ -> It is the class instance object
  • \n
  • \\__doc\\__
  • \n
  • \\__name\\__
  • \n
  • \\__module\\__
  • \n
  • \\__bases\\__ -> Tuples containing the base classes
  • \n
\n
\nGenerator functions -> which uses yield. It return an iterator object which can be used to execute the body of the function.
\nasync def is called a coroutine function, which returns a coroutine object. It contains await, async with, async for.
\nasync def which uses yield is called asyncronous generator function, whose return object can be used in async for
\n

__Class Instances__\nA class instance has a namespace implemented as a dictionary where attribute references are first searched. When not found than instance's class has also a atrribute by that name. If not found than _self_ is checked and if still not found than \\__getattr\\__() is checked. Attribute assignments and deletions always update the instance's dictionary.<\\p>\nVarious internal types used internally by the interpreter\n

    \n
  1. Code objects -> Represent byte-complied execuatble Python code or bytecode. They contain no references to mutable objects.
  2. \n
  3. Frame objects -> Represent execution frames, occur in traceback objects. Support one method called frame.clear() which clears all references to local variables.
  4. \n
  5. Traceback objects -> Represent a stack trace of an exception. It occurs when an exception occurs
  6. \n
  7. Slice objects -> Slices of \\__getitem\\__() methods or by slice().
  8. \n
  9. Static method objects -> Wrapper around any other object. When a static method object is retrieved from a class or a class instance, the object actually returns a wrapper object.
  10. \n
  11. Class method objects -> A wrapper aroudn another classes or class instances.
  12. \n
\nSpecial method names\n
    \n
  1. object.\\__new\\__(cls[]) -> To create a new instance class cls
  2. \n
  3. object.\\__init\\__(self[]) -> A new instance when created by 1) then this it is called first before sending the control to the caller.
  4. \n
  5. object.\\__del\\__(self) -> To destroy the object
  6. \n
  7. object.\\__repr\\__(self) -> To compute the official string representation of an object.
  8. \n
\nBy default instances of classes have a dictioanry for attribute storage. This space consumption can become large when a large number of instances of a class are created. This default can be overridden by using \\__slots\\__ in a class definition. It reserves sapce for the declared vaariabels and prevents automatic creation of dict of each instance.
\nIf you want to dynamically declare new vairbales then add \\__dict\\__ to the sequence of strings in \\__slots\\__ declaration.\n__Metaclasses__
\nThe class creation process can be customized by passing the metaclass keyword argument in the class definition line or by inheriting from an existing class that included such ar argument.\nEnd of explanation"}}},{"rowIdx":2137,"cells":{"Unnamed: 0":{"kind":"number","value":2137,"string":"2,137"},"text_prompt":{"kind":"string","value":"\nGiven the following text problem statement, write Python code to implement the functionality described below in problem statement\nProblem:\nUsing Sklearn RFE to Select Features\n"},"code_prompt":{"kind":"string","value":"Python Code::\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.feature_selection import RFE\nrf = RandomForestRegressor(random_state=101)\nrfe = RFE(rf, n_features_to_select=8)\nrfe = rfe.fit(X_train, y_train)\npredictions = rfe.predict(X_test)\n#Print feature rankings\nfeature_rankings = pd.DataFrame({'feature_names':np.array(X_train.columns),'feature_ranking':rfe.ranking_})\nprint(feature_rankings)\n"}}},{"rowIdx":2138,"cells":{"Unnamed: 0":{"kind":"number","value":2138,"string":"2,138"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Evaluation des modèles pour l'extraction supercritique\nL'extraction supercritique est de plus en plus utilisée afin de retirer des matières organiques de différents liquides ou matrices solides. Cela est dû au fait que les fluides supercritiques ont des avantages non négligeables par rapport aux autres solvants, ils ont des caractèreistiques comprises entre celles des gaz et celles des solides. En changeant la température et la pression ils peuvent capter des composés différents, ils sont donc très efficaces. \nLe méchanisme de l'extraction supercritique est le suivant \nStep1: Ejemplo 2 funciona\nStep2: Fonction\nModelo Reverchon\nMathematical Modeling of Supercritical Extraction of Sage Oil\nStep6: Trabajo futuro\nRealizar modificaciones de los parametros para observar cómo afectan al comportamiento del modelo.\nRealizar un ejemplo de optimización de parámetros utilizando el modelo de Reverchon.\nReferencias\n[1] E. Reverchon, Mathematical modelling of supercritical extraction of sage oil, AIChE J. 42 (1996) 1765–1771.\nhttps"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nfrom scipy import integrate\nfrom matplotlib.pylab import *\nExplanation: Evaluation des modèles pour l'extraction supercritique\nL'extraction supercritique est de plus en plus utilisée afin de retirer des matières organiques de différents liquides ou matrices solides. Cela est dû au fait que les fluides supercritiques ont des avantages non négligeables par rapport aux autres solvants, ils ont des caractèreistiques comprises entre celles des gaz et celles des solides. En changeant la température et la pression ils peuvent capter des composés différents, ils sont donc très efficaces. \nLe méchanisme de l'extraction supercritique est le suivant : \n- Transport du fluide vers la particule, en premier lieu sur sa surface et en deuxième lieu a l'intérieur de la particule par diffusion\n- Dissolution du soluté avec le fluide supercritique \n- Transport du solvant de l'intérieur vers la surface de la particule \n- Transport du solvant et des solutés de la surface de la particule vers la masse du solvant \nA - Le modèle de Reverchon : \nAfin d'utiliser ce modèle, définissons les variables qui vont y être admises, ci-dessous la nomenclature du modèle :\n \nLe modèle : \nIl est basé sur l'intégration des bilans de masses différentielles tout le long de l'extraction, avec les hypothèses suivants : \n- L'écoulement piston existe à l'intérieur du lit, comme le montre le schéma ci-contre : \n- La dispersion axiale du lit est négligeable\n- Le débit, la température et la pression sont constants\nCela nous permet d'obtenir les équations suivantes :\n- $uV.\\frac{\\partial c_{c}}{\\partial t}+eV.\\frac{\\partial c_{c}}{\\partial t}+ AK(q-q) = 0$\n- $(1-e).V.uV\\frac{\\partial c_{q}}{\\partial t}= -AK(q-q*)$\nLes conditions initiales sont les suivantes : C = 0, q=q0 à t = 0 et c(0,t) à h=0\nLa phase d'équilibre est : $c = k.q*$\nSachant que le fluide et la phase sont uniformes à chaque stage, nous pouvons définir le modèle en utilisant les équations différentielles ordinaires (2n). Les équations sont les suivantes :\n- $(\\frac{W}{p}).(Cn- Cn-1) + e (\\frac{v}{n}).(\\frac{dcn}{dt})+(1-e).(\\frac{v}{n}).(\\frac{dcn}{dt}) = 0$\n- $(\\frac{dqn}{dt} = - (\\frac{1}{ti})(qn-qn*)$\n- Les conditions initiales sont : cn = 0, qn = q0 à t = 0 \nEjemplo ODE\nEnd of explanation\nimport numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\ndef vdp1(t, y):\n return np.array([y[1], (1 - y[0]**2)*y[1] - y[0]])\nt0, t1 = 0, 20 # start and end\nt = np.linspace(t0, t1, 100) # the points of evaluation of solution\ny0 = [2, 0] # initial value\ny = np.zeros((len(t), len(y0))) # array for solution\ny[0, :] = y0\nr = integrate.ode(vdp1).set_integrator(\"dopri5\") # choice of method\nr.set_initial_value(y0, t0) # initial values\nfor i in range(1, t.size):\n y[i, :] = r.integrate(t[i]) # get one more value, add it to the array\n if not r.successful():\n raise RuntimeError(\"Could not integrate\")\nplt.plot(t, y)\nplt.show()\nExplanation: Ejemplo 2 funciona\nEnd of explanation\nP = 9 #MPa\nT = 323 # K\nQ = 8.83 #g/min\ne = 0.4\nrho = 285 #kg/m3\nmiu = 2.31e-5 # Pa*s\ndp = 0.75e-3 # m\nDl = 0.24e-5 #m2/s\nDe = 8.48e-12 # m2/s\nDi = 6e-13\nu = 0.455e-3 #m/s\nkf = 1.91e-5 #m/s\nde = 0.06 # m\nW = 0.160 # kg\nkp = 0.2\nr = 0.31 #m\nn = 10\nV = 12\n#C = kp * qE\nC = 0.1\nqE = C / kp\nCn = 0.05\nCm = 0.02\nt = np.linspace(0,10, 1)\nti = (r ** 2) / (15 * Di)\ndef reverchon(x,t):\n \n #Ecuaciones diferenciales del modelo Reverchon \n #dCdt = - (n/(e * V)) * (W * (Cn - Cm) / rho + (1 - e) * V * dqdt)\n #dqdt = - (1 / ti) * (q - qE)\n \n q = x[0]\n C = x[1]\n qE = C / kp\n dqdt = - (1 / ti) * (q - qE)\n dCdt = - (n/(e * V)) * (W * (C - Cm) / rho + (1 - e) * V * dqdt)\n \n return [dqdt, dCdt] \nreverchon([1, 2], 0)\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\nresultado = odeint(reverchon, x0, t)\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\nresultado = odeint(reverchon, x0, t)\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, qR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C solid–fluid interface [=] $kg/m^3$\")\nprint(CR)\nr = 0.31 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\nresultado = odeint(reverchon, x0, t)\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\nr = 0.231 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\nresultado = odeint(reverchon, x0, t)\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\nfig,axes=plt.subplots(2,2)\naxes[0,0].plot(t,CR)\naxes[1,0].plot(t,qR)\nExplanation: Fonction\nModelo Reverchon\nMathematical Modeling of Supercritical Extraction of Sage Oil\nEnd of explanation\n#Datos experimentales\nx_data = np.linspace(0,9,10)\ny_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])\ndef f(y, t, k): \n sistema de ecuaciones diferenciales ordinarias \n return (-k[0]*y[0], k[0]*y[0]-k[1]*y[1], k[1]*y[1])\ndef my_ls_func(x,teta):\n f2 = lambda y, t: f(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n r = integrate.odeint(f2, y0, x)\n return r[:,1]\ndef f_resid(p):\n # definir la función de minimos cuadrados para cada valor de y\n \n return y_data - my_ls_func(x_data,p)\n#resolver el problema de optimización\nguess = [0.2, 0.3] #valores inicales para los parámetros\ny0 = [1,0,0] #valores inciales para el sistema de ODEs\n(c, kvg) = optimize.leastsq(f_resid, guess) #get params\nprint(\"parameter values are \",c)\n# interpolar los valores de las ODEs usando splines\nxeval = np.linspace(min(x_data), max(x_data),30) \ngls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)\nxeval = np.linspace(min(x_data), max(x_data), 200)\n#Gráficar los resultados\npp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')\npp.xlabel('t [=] min',{\"fontsize\":16})\npp.ylabel(\"C\",{\"fontsize\":16})\npp.legend(('Datos','Modelo'),loc=0)\npp.show()\nf_resid(guess)\n#Datos experimentales\nx_data = np.linspace(0,9,10)\ny_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])\nprint(y_data)\n# def f(y, t, k): \n# sistema de ecuaciones diferenciales ordinarias \n \n# return (-k[0]*y[0], k[0]*y[0]-k[1]*y[1], k[1]*y[1])\ndef reverchon(x,t,Di):\n \n #Ecuaciones diferenciales del modelo Reverchon \n #dCdt = - (n/(e * V)) * (W * (Cn - Cm) / rho + (1 - e) * V * dqdt)\n #dqdt = - (1 / ti) * (q - qE)\n \n q = x[0]\n C = x[1]\n qE = C / kp\n ti = (r**2) / (15 * Di)\n dqdt = - (1 / ti) * (q - qE)\n dCdt = - (n/(e * V)) * (W * (C - Cm) / rho + (1 - e) * V * dqdt)\n \n return [dqdt, dCdt] \ndef my_ls_func(x,teta):\n f2 = lambda y, t: reverchon(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n rr = integrate.odeint(f2, y0, x)\n print(f2)\n return rr[:,1]\ndef f_resid(p):\n # definir la función de minimos cuadrados para cada valor de y\n \n return y_data - my_ls_func(p,x_data)\n#resolver el problema de optimización\nguess = np.array([0.2]) #valores inicales para los parámetros\ny0 = [0,0] #valores inciales para el sistema de ODEs\n(c, kvg) = optimize.leastsq(f_resid, guess) #get params\nprint(\"parameter values are \",c)\n# interpolar los valores de las ODEs usando splines\nxeval = np.linspace(min(x_data), max(x_data),30) \ngls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)\nxeval = np.linspace(min(x_data), max(x_data), 200)\n#Gráficar los resultados\npp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')\npp.xlabel('t [=] min',{\"fontsize\":16})\npp.ylabel(\"C\",{\"fontsize\":16})\npp.legend(('Datos','Modelo'),loc=0)\npp.show()\ndef my_ls_func(x,teta):\n f2 = lambda y, t: reverchon(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n r = integrate.odeint(f2, y0, x)\n print(f2)\n return r[:,1]\nmy_ls_func(y0,guess)\nf_resid(guess)\nExplanation: Trabajo futuro\nRealizar modificaciones de los parametros para observar cómo afectan al comportamiento del modelo.\nRealizar un ejemplo de optimización de parámetros utilizando el modelo de Reverchon.\nReferencias\n[1] E. Reverchon, Mathematical modelling of supercritical extraction of sage oil, AIChE J. 42 (1996) 1765–1771.\nhttps://onlinelibrary.wiley.com/doi/pdf/10.1002/aic.690420627\n[2] Amit Rai, Kumargaurao D.Punase, Bikash Mohanty, Ravindra Bhargava, Evaluation of models for supercritical fluid extraction, International Journal of Heat and Mass Transfer Volume 72, May 2014, Pages 274-287. https://www.sciencedirect.com/science/article/pii/S0017931014000398\nAjuste de parámetros con ODEs: modelo Reverchon\nExplicaciones : \n- Poner los datos experimentales \n- Definir las ecuaciones diferenciales ordinarias del systema con los diferentes parametros\n- Calcular el valor de la ecuacion diferencial en cada punto, se necesita una otra funcion para integrar la ecuacion \n- Despues tenemos que definir una funcion de minimos cuadrados para cada valor de y : minimos cuadrados es una tecnica de analisis numerico enmarcada dentro de la optimizacion matematica y se intenta encontrar la funcion continua entre los variables independentes y dependentes \n- Para resolverlo se necesita las varoles iniciales para los parametros y los ecuacions ordinarias, para obtener los paramètros de la funcion. Despues se necesita hacer una interpolacion para los valores de las ODEs y para hacerlo se usa splines (spline es una funcion definida per partes por los polynomios), en Python splines es un método que se usa cuando hay problemas de interpolacion.\nEnd of explanation"}}},{"rowIdx":2139,"cells":{"Unnamed: 0":{"kind":"number","value":2139,"string":"2,139"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Title\nStep1: Load Iris Data\nStep2: Create Random Forest Classifier\nStep3: Train Random Forest Classifier\nStep4: Predict Previously Unseen Observation"},"code_prompt":{"kind":"string","value":"Python Code:\n# Load libraries\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import datasets\nExplanation: Title: Random Forest Classifier\nSlug: random_forest_classifier\nSummary: Training a random forest classifier in scikit-learn.\nDate: 2017-09-21 12:00\nCategory: Machine Learning\nTags: Trees And Forests\nAuthors: Chris Albon \nPreliminaries\nEnd of explanation\n# Load data\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nExplanation: Load Iris Data\nEnd of explanation\n# Create random forest classifer object that uses entropy\nclf = RandomForestClassifier(criterion='entropy', random_state=0, n_jobs=-1)\nExplanation: Create Random Forest Classifier\nEnd of explanation\n# Train model\nmodel = clf.fit(X, y)\nExplanation: Train Random Forest Classifier\nEnd of explanation\n# Make new observation\nobservation = [[ 5, 4, 3, 2]]\n \n# Predict observation's class \nmodel.predict(observation)\nExplanation: Predict Previously Unseen Observation\nEnd of explanation"}}},{"rowIdx":2140,"cells":{"Unnamed: 0":{"kind":"number","value":2140,"string":"2,140"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Working with Multidimensional Coordinates\nAuthor\nStep1: As an example, consider this dataset from the xarray-data repository.\nStep2: In this example, the logical coordinates are x and y, while the physical coordinates are xc and yc, which represent the latitudes and longitude of the data.\nStep3: Plotting\nLet's examine these coordinate variables by plotting them.\nStep4: Note that the variables xc (longitude) and yc (latitude) are two-dimensional scalar fields.\nIf we try to plot the data variable Tair, by default we get the logical coordinates.\nStep5: In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply cartopy map projections.\nStep6: Multidimensional Groupby\nThe above example allowed us to visualize the data on a regular latitude-longitude grid. But what if we want to do a calculation that involves grouping over one of these physical coordinates (rather than the logical coordinates), for example, calculating the mean temperature at each latitude. This can be achieved using xarray's groupby function, which accepts multidimensional variables. By default, groupby will use every unique value in the variable, which is probably not what we want. Instead, we can use the groupby_bins function to specify the output coordinates of the group."},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport cartopy.crs as ccrs\nfrom matplotlib import pyplot as plt\nExplanation: Working with Multidimensional Coordinates\nAuthor: Ryan Abernathey\nMany datasets have physical coordinates which differ from their logical coordinates. Xarray provides several ways to plot and analyze such datasets.\nEnd of explanation\nds = xr.tutorial.open_dataset('rasm').load()\nds\nExplanation: As an example, consider this dataset from the xarray-data repository.\nEnd of explanation\nprint(ds.xc.attrs)\nprint(ds.yc.attrs)\nExplanation: In this example, the logical coordinates are x and y, while the physical coordinates are xc and yc, which represent the latitudes and longitude of the data.\nEnd of explanation\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(14,4))\nds.xc.plot(ax=ax1)\nds.yc.plot(ax=ax2)\nExplanation: Plotting\nLet's examine these coordinate variables by plotting them.\nEnd of explanation\nds.Tair[0].plot()\nExplanation: Note that the variables xc (longitude) and yc (latitude) are two-dimensional scalar fields.\nIf we try to plot the data variable Tair, by default we get the logical coordinates.\nEnd of explanation\nplt.figure(figsize=(14,6))\nax = plt.axes(projection=ccrs.PlateCarree())\nax.set_global()\nds.Tair[0].plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(), x='xc', y='yc', add_colorbar=False)\nax.coastlines()\nax.set_ylim([0,90]);\nExplanation: In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply cartopy map projections.\nEnd of explanation\n# define two-degree wide latitude bins\nlat_bins = np.arange(0,91,2)\n# define a label for each bin corresponding to the central latitude\nlat_center = np.arange(1,90,2)\n# group according to those bins and take the mean\nTair_lat_mean = ds.Tair.groupby_bins('xc', lat_bins, labels=lat_center).mean(dim=xr.ALL_DIMS)\n# plot the result\nTair_lat_mean.plot()\nExplanation: Multidimensional Groupby\nThe above example allowed us to visualize the data on a regular latitude-longitude grid. But what if we want to do a calculation that involves grouping over one of these physical coordinates (rather than the logical coordinates), for example, calculating the mean temperature at each latitude. This can be achieved using xarray's groupby function, which accepts multidimensional variables. By default, groupby will use every unique value in the variable, which is probably not what we want. Instead, we can use the groupby_bins function to specify the output coordinates of the group.\nEnd of explanation"}}},{"rowIdx":2141,"cells":{"Unnamed: 0":{"kind":"number","value":2141,"string":"2,141"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\nStep1: Network Traffic Forecasting (using time series data)\nIn telco, accurately forecasting KPIs (e.g. network traffic, utilizations, user experience, etc.) for communication networks ( 2G/3G/4G/5G/wired) can help predict network failures, allocate resource, or save energy. \nIn this notebook, we demonstrate a reference use case where we use the network traffic KPI(s) in the past to predict traffic KPI(s) in the future. We demostrate how to do multivariate multistep forecasting using Project Chronos.\nFor demonstration, we use the publicly available network traffic data repository maintained by the WIDE project and in particular, the network traffic traces aggregated every 2 hours (i.e. AverageRate in Mbps/Gbps and Total Bytes) in year 2018 and 2019 at the transit link of WIDE to the upstream ISP (dataset link). \nHelper functions\nThis section defines some helper functions to be used in the following procedures. You can refer to it later when they're used.\nStep2: Step 0\nStep3: Visualize the target KPIs\nStep4: Step 1\nStep5: Initialize train, valid and test tsdataset from raw pandas dataframe.\nStep6: Preprocess the datasets. Here we perform\nStep7: Convert TSDataset to numpy.\nStep8: Step 2\nStep9: You can use this method to print the parameter list.\nStep10: After training is finished. You can use the forecaster to do prediction and evaluation.\nStep11: Since we have used standard scaler to scale the input data (including the target values), we need to inverse the scaling on the predicted values too.\nStep12: Calculate mean square error and the symetric mean absolute percentage error.\nStep13: You may save & restore the forecaster.\nStep14: If you only want to save the pytorch model\nStep15: Visualization\nPlot actual and prediction values for AvgRate KPI\nStep16: Plot actual and prediction values for total bytes KPI"},"code_prompt":{"kind":"string","value":"Python Code:\nimport warnings\nwarnings.filterwarnings('ignore')\nimport matplotlib.pyplot as plt\n%matplotlib inline\ndef plot_predict_actual_values(date, y_pred, y_test, ylabel):\n \n plot the predicted values and actual values (for the test data)\n \n fig, axs = plt.subplots(figsize=(12,5))\n axs.plot(date, y_pred, color='red', label='predicted values')\n axs.plot(date, y_test, color='blue', label='actual values')\n axs.set_title('the predicted values and actual values (for the test data)')\n plt.xlabel('test datetime')\n plt.ylabel(ylabel)\n plt.legend(loc='upper left')\n plt.show()\nExplanation: Network Traffic Forecasting (using time series data)\nIn telco, accurately forecasting KPIs (e.g. network traffic, utilizations, user experience, etc.) for communication networks ( 2G/3G/4G/5G/wired) can help predict network failures, allocate resource, or save energy. \nIn this notebook, we demonstrate a reference use case where we use the network traffic KPI(s) in the past to predict traffic KPI(s) in the future. We demostrate how to do multivariate multistep forecasting using Project Chronos.\nFor demonstration, we use the publicly available network traffic data repository maintained by the WIDE project and in particular, the network traffic traces aggregated every 2 hours (i.e. AverageRate in Mbps/Gbps and Total Bytes) in year 2018 and 2019 at the transit link of WIDE to the upstream ISP (dataset link). \nHelper functions\nThis section defines some helper functions to be used in the following procedures. You can refer to it later when they're used.\nEnd of explanation\nfrom bigdl.chronos.data.utils.public_dataset import PublicDataset\ndf = PublicDataset(name='network_traffic', \n path='~/.chronos/dataset', \n redownload=False).get_public_data().preprocess_network_traffic().df\ndf.head()\nExplanation: Step 0: Prepare dataset\nChronos has provided built-in dataset APIs for easily download and preprocessed public dataset. You could find API guide here.\nWith below APIs, we first download network traffic data and then preprocess the downloaded dataset. The pre-processing mainly contains 2 parts:\nConvert \"StartTime\" values from string to Pandas TimeStamp\nUnify the measurement scale for \"AvgRate\" values - some uses Mbps, some uses Gbps\nEnd of explanation\nax = df.plot(y='AvgRate', figsize=(16,6), title=\"AvgRate of network traffic data\")\nax = df.plot(y='total', figsize=(16,6), title='total bytes of network traffic data')\nExplanation: Visualize the target KPIs: \"AvgRate\" and \"total\"\nEnd of explanation\nfrom bigdl.chronos.data import TSDataset\nfrom sklearn.preprocessing import StandardScaler\nExplanation: Step 1: Data transformation and feature engineering using Chronos TSDataset\nTSDataset is our abstract of time series dataset for data transformation and feature engineering. Here we use it to preprocess the data.\nEnd of explanation\ntsdata_train, _, tsdata_test = TSDataset.from_pandas(df, dt_col=\"StartTime\", target_col=[\"AvgRate\", \"total\"], with_split=True, test_ratio=0.1)\nExplanation: Initialize train, valid and test tsdataset from raw pandas dataframe.\nEnd of explanation\nlook_back = 84\nhorizon = 12\nstandard_scaler = StandardScaler()\nfor tsdata in [tsdata_train, tsdata_test]:\n tsdata.gen_dt_feature(features=[\"HOUR\", \"WEEKDAY\"], one_hot_features=[\"HOUR\", \"WEEKDAY\"])\\\n .impute(mode=\"last\")\\\n .scale(standard_scaler, fit=(tsdata is tsdata_train))\\\n .roll(lookback=look_back, horizon=horizon)\nExplanation: Preprocess the datasets. Here we perform:\ngen_dt_feature: generate feature from datetime (e.g. month, day...)\nimpute: fill the missing values\nscale: scale each feature to standard distribution.\nroll: sample the data with sliding window.\nFor forecasting task, we will look back 1 weeks' historical data (84 records with sample frequency of 2h) and predict the value of next 1 day (12 records).\nWe perform the same transformation processes on train and test set.\nEnd of explanation\nx_train, y_train = tsdata_train.to_numpy()\nx_test, y_test = tsdata_test.to_numpy()\n#x.shape = (num of sample, lookback, num of input feature)\n#y.shape = (num of sample, horizon, num of output feature)\nx_train.shape, y_train.shape, x_test.shape, y_test.shape\nExplanation: Convert TSDataset to numpy.\nEnd of explanation\nfrom bigdl.chronos.forecaster.tcn_forecaster import TCNForecaster\nforecaster = TCNForecaster(past_seq_len = look_back,\n future_seq_len = horizon,\n input_feature_num = x_train.shape[-1],\n output_feature_num = 2, # \"AvgRate\" and \"total\"\n num_channels = [30] * 7,\n repo_initialization = False,\n kernel_size = 3, \n dropout = 0.1, \n lr = 0.001,\n seed = 0)\nforecaster.num_processes = 1\nExplanation: Step 2: Time series forecasting using Chronos Forecaster\nWe demonstrate how to use chronos TCNForecaster for multi-variate and multi-step forecasting. For more details, you can refer to TCNForecaster document here.\nFirst, we initialize a forecaster.\n* num_channels: The filter numbers of the convolutional layers. It can be a list.\n* kernel_size: Convolutional layer filter height.\nEnd of explanation\nforecaster.data_config, forecaster.model_config\n%%time\nforecaster.fit((x_train, y_train), epochs=20, batch_size=64)\nExplanation: You can use this method to print the parameter list.\nEnd of explanation\n# make prediction\ny_pred = forecaster.predict(x_test)\nExplanation: After training is finished. You can use the forecaster to do prediction and evaluation.\nEnd of explanation\ny_pred_unscale = tsdata_test.unscale_numpy(y_pred)\ny_test_unscale = tsdata_test.unscale_numpy(y_test)\nExplanation: Since we have used standard scaler to scale the input data (including the target values), we need to inverse the scaling on the predicted values too.\nEnd of explanation\n# evaluate with mse, smape\nfrom bigdl.orca.automl.metrics import Evaluator\navgrate_mse = Evaluator.evaluate(\"mse\", y_test_unscale[:, :, 0], y_pred_unscale[:, :, 0], multioutput='uniform_average')\navgrate_smape = Evaluator.evaluate(\"smape\", y_test_unscale[:, :, 0], y_pred_unscale[:, :, 0], multioutput='uniform_average')\ntotal_mse = Evaluator.evaluate(\"mse\", y_test_unscale[:, :, 1], y_pred_unscale[:, :, 1], multioutput='uniform_average')\ntotal_smape = Evaluator.evaluate(\"smape\", y_test_unscale[:, :, 1], y_pred_unscale[:, :, 1], multioutput='uniform_average')\nprint(f\"Evaluation result for AvgRate: mean squared error is {'%.2f' % avgrate_mse}, sMAPE is {'%.2f' % avgrate_smape}\")\nprint(f\"Evaluation result for total: mean squared error is {'%.2f' % total_mse}, sMAPE is {'%.2f' % total_smape}\")\nExplanation: Calculate mean square error and the symetric mean absolute percentage error.\nEnd of explanation\nforecaster.save(\"network_traffic.fxt\")\nforecaster.load(\"network_traffic.fxt\")\nExplanation: You may save & restore the forecaster.\nEnd of explanation\nmodel = forecaster.get_model()\nimport torch\ntorch.save(model, \"tcn.pt\")\nExplanation: If you only want to save the pytorch model\nEnd of explanation\ntest_date=df[-y_pred_unscale.shape[0]:].index\n# You can choose the number of painting steps by specifying the step by yourself.\nstep = 0 # the first step\ntarget_name = \"AvgRate\"\ntarget_index = 0\nplot_predict_actual_values(date=test_date, y_pred=y_pred_unscale[:, step, target_index], y_test=y_test_unscale[:, step, target_index], ylabel=target_name)\nExplanation: Visualization\nPlot actual and prediction values for AvgRate KPI\nEnd of explanation\ntarget_name = \"total\"\ntarget_index = 1\nplot_predict_actual_values(date=test_date, y_pred=y_pred_unscale[:, step, target_index], y_test=y_test_unscale[:, step, target_index], ylabel=target_name)\nExplanation: Plot actual and prediction values for total bytes KPI\nEnd of explanation"}}},{"rowIdx":2142,"cells":{"Unnamed: 0":{"kind":"number","value":2142,"string":"2,142"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Harvesting WMS into CKAN\nThis notebook illustrates harvesting of a WMS endpoint into a CKAN instance.\nContext\nThe harvested WMS endpoint belongs to Landgate's Spatial Land Information Program (SLIP). The layers within are authored by partner agencies or Landgate. There are one or several different web service endpoints per WMS layer.\nOrganisations\nFrom a spreadsheet of agency references, names, and further information, CKAN organisations are initially created and subsequently used as owners of the respective harvested WMS layers.\nTopics\nThe WMS layers are organised by topics, which will be created both as CKAN groups and keywords. Harvested datasets will be allocated to releveant CKAN groups.\nLayer names\nThe WMS layer names contain the layer ID, consisting of agancy slug and layer reference, and the publishing date, and will be split up during harvesting.\nAdditional resources\nAdditional web service end points, as well as a list of published PDFs with further information, are added as extra resources to the CKAN datasets from harvested WMS layers.\nCKAN credentials\nSensitive information and related configuration, such as CKAN URLs and credentials, are stored in a separate file.\nTo use this workbook on your own CKAN instance, write the following contents into a file secret.py in the same directory as this workbook\nStep1: OGC W*S endpoints\nStep2: Additional Lookups\nStep3: Create Organisations and Groups\nThe next step will create or update CKAN organisations from organisations.csv, and CKAN groups from WMS topics.\nStep4: Prepare data\nThe following step will prepare a dictionary of dataset metadata, ready to be inserted into CKAN. \nIt parses the WMS endpoint and looks up dictionaries organisations, groups, and pdf_dict.\nThis step runs very quickly, as it only handles dictionaries of WMS layers, organisations and groups (both\nStep5: Delete old datasets\nNote\nStep6: Update datasets in CKAN\nFirst pass\nStep7: Second pass"},"code_prompt":{"kind":"string","value":"Python Code:\nimport ckanapi\nfrom harvest_helpers import *\nfrom secret import CKAN, SOURCES\n## enable one of:\n#ckan = ckanapi.RemoteCKAN(CKAN[\"ct\"][\"url\"], apikey=CKAN[\"ct\"][\"key\"])\n#ckan = ckanapi.RemoteCKAN(CKAN[\"ca\"][\"url\"], apikey=CKAN[\"ca\"][\"key\"])\nckan = ckanapi.RemoteCKAN(CKAN[\"cb\"][\"url\"], apikey=CKAN[\"cb\"][\"key\"])\nprint(\"Using CKAN {0}\".format(ckan.address))\nExplanation: Harvesting WMS into CKAN\nThis notebook illustrates harvesting of a WMS endpoint into a CKAN instance.\nContext\nThe harvested WMS endpoint belongs to Landgate's Spatial Land Information Program (SLIP). The layers within are authored by partner agencies or Landgate. There are one or several different web service endpoints per WMS layer.\nOrganisations\nFrom a spreadsheet of agency references, names, and further information, CKAN organisations are initially created and subsequently used as owners of the respective harvested WMS layers.\nTopics\nThe WMS layers are organised by topics, which will be created both as CKAN groups and keywords. Harvested datasets will be allocated to releveant CKAN groups.\nLayer names\nThe WMS layer names contain the layer ID, consisting of agancy slug and layer reference, and the publishing date, and will be split up during harvesting.\nAdditional resources\nAdditional web service end points, as well as a list of published PDFs with further information, are added as extra resources to the CKAN datasets from harvested WMS layers.\nCKAN credentials\nSensitive information and related configuration, such as CKAN URLs and credentials, are stored in a separate file.\nTo use this workbook on your own CKAN instance, write the following contents into a file secret.py in the same directory as this workbook:\n```\nCKAN = {\n \"ca\":{\n \"url\": \"http://catalogue.alpha.data.wa.gov.au/\",\n \"key\": \"your-api-key\" \n },\n \"cb\":{\n \"url\": \"http://catalogue.beta.data.wa.gov.au/\",\n \"key\": \"your-api-key\" \n }\n}\nSOURCES = {\n \"NAME\": {\n \"proxy\": \"proxy_url\",\n \"url\": \"https://www2.landgate.wa.gov.au/ows/wmspublic\"\n },\n ...\n}\nARCGIS = {\n \"SLIPFUTURE\" : {\n \"url\": \"http://services.slip.wa.gov.au/arcgis/rest/services\",\n \"folders\": [\"QC\", ...]\n },\n ...\n}\n``\nInsert your catalogue names, urls, and importantly, your write-permitted CKAN API keys.\nNext we'll import the whole dictionaryCKAN`.\nEnd of explanation\nwmsP = WebMapService(SOURCES[\"wmspublic\"][\"proxy\"])\nwmsP_url = SOURCES[\"wmspublic\"][\"url\"]\nwmsCM = WebMapService(SOURCES[\"wmsCsMosaic\"][\"proxy\"])\nwmsCM_url = SOURCES[\"wmsCsMosaic\"][\"url\"]\nwmsCC = WebMapService(SOURCES[\"wmsCsCadastre\"][\"proxy\"])\nwmsCC_url = SOURCES[\"wmsCsCadastre\"][\"url\"]\nwfsP = WebFeatureService(SOURCES[\"wfspublic_4326\"][\"proxy\"])\nwfsP_url = SOURCES[\"wfspublic_4326\"][\"url\"]\nwfsCA = WebFeatureService(SOURCES[\"wfsCsAdmin_4283\"][\"proxy\"])\nwfsCA_url = SOURCES[\"wfsCsAdmin_4283\"][\"url\"]\nwfsCC = WebFeatureService(SOURCES[\"wfsCsCadastre_4283\"][\"proxy\"])\nwfsCC_url = SOURCES[\"wfsCsCadastre_4283\"][\"url\"]\n#wfsCT = WebFeatureService(SOURCES[\"wfsCsTopo_4283\"][\"proxy\"])\n#wfsCT_url = SOURCES[\"wfsCsTopo_4283\"][\"url\"]\nExplanation: OGC W*S endpoints\nEnd of explanation\npdfs = get_pdf_dict(\"data-dictionaries.csv\")\norg_dict = get_org_dict(\"organisations.csv\")\ngroup_dict = get_group_dict(wmsP)\nExplanation: Additional Lookups\nEnd of explanation\norgs = upsert_orgs(org_dict, ckan, debug=False)\ngroups = upsert_groups(group_dict, ckan, debug=False)\nExplanation: Create Organisations and Groups\nThe next step will create or update CKAN organisations from organisations.csv, and CKAN groups from WMS topics.\nEnd of explanation\nl_wmsP = get_layer_dict(wmsP, wmsP_url, ckan, orgs, groups, pdfs, res_format=\"WMS\", debug=False)\nl_wmsCC = get_layer_dict(wmsCC, wmsCC_url, ckan, orgs, groups, pdfs, res_format=\"WMS\", debug=False)\nl_wmsCM = get_layer_dict(wmsCM, wmsCM_url, ckan, orgs, groups, pdfs, res_format=\"WMS\", debug=False)\nl_wfsP = get_layer_dict(wfsP, wfsP_url, ckan, orgs, groups, pdfs, res_format=\"WFS\", debug=False)\nl_wfsCA = get_layer_dict(wfsCA, wfsCA_url, ckan, orgs, groups, pdfs, res_format=\"WFS\", debug=False)\nl_wfsCC = get_layer_dict(wfsCC, wfsCC_url, ckan, orgs, groups, pdfs, res_format=\"WFS\", debug=False)\nExplanation: Prepare data\nThe following step will prepare a dictionary of dataset metadata, ready to be inserted into CKAN. \nIt parses the WMS endpoint and looks up dictionaries organisations, groups, and pdf_dict.\nThis step runs very quickly, as it only handles dictionaries of WMS layers, organisations and groups (both: name and id) and PDFs (name, id, url). There are no API calls to either CKAN or the WMS involved.\nEnd of explanation\n# Delete all datasets with old SLIP layer id name slug\nkill_list = [n for n in ckan.action.package_list() if re.match(r\"(.)*-[0-9][0-9][0-9]$\", n)]\n#killed = [ckan.action.package_delete(id=n) for n in kill_list]\nprint(\"Killed {0} obsolete datasets\".format(len(kill_list)))\nExplanation: Delete old datasets\nNote: With great power comes great responsibility. Execute the next chunk with care and on your own risk.\nEnd of explanation\np_wmsP = upsert_datasets(l_wmsP, ckan, overwrite_metadata=True, drop_existing_resources=True)\nprint(\"{0} datasets created or updated from {1} Public WMS layers\".format(len(p_wmsP), len(wmsP.contents)))\nExplanation: Update datasets in CKAN\nFirst pass: add public WMS layer, overwrite metadata if dataset exists and drop any existing resources.\nEnd of explanation\np_wfs = upsert_datasets(l_wfsP, ckan, overwrite_metadata=False, drop_existing_resources=False)\nprint(\"{0} datasets created or updated from {1} public WFS layers\".format(len(p_wfs), len(wfsP.contents)))\np_wmsCC = upsert_datasets(l_wmsCC, ckan, overwrite_metadata=False, drop_existing_resources=False, debug=False)\nprint(\"{0} datasets created or updated from {1} Cadastre WMS layers\".format(len(p_wmsCC), len(wmsCC.contents)))\np_wfsCC = upsert_datasets(l_wfsCC, ckan, overwrite_metadata=False, drop_existing_resources=False)\nprint(\"{0} datasets created or updated from {1} Cadastre WFS layers\".format(len(p_wfsCC), len(wfsCC.contents)))\np_wfsCA = upsert_datasets(l_wfsCA, ckan, overwrite_metadata=False, drop_existing_resources=False)\nprint(\"{0} datasets created or updated from {1} Cadastre Admin WFS layers\".format(len(p_wfsCA), len(wfsCA.contents)))\nExplanation: Second pass: add public WFS, but retain metadata and resources of existing datasets. Repeat this mode for remaining sources.\nEnd of explanation"}}},{"rowIdx":2143,"cells":{"Unnamed: 0":{"kind":"number","value":2143,"string":"2,143"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Sentiment Analysis with an RNN\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.\nThe architecture for this network is shown below.\n\nHere, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.\nFrom the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.\nWe don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.\nStep1: Data preprocessing\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\nYou can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \\n. To deal with those, I'm going to split the text into each review using \\n as the delimiter. Then I can combined all the reviews back together into one big string.\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.\nStep2: Encoding the words\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\nExercise\nStep3: Encoding the labels\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\nExercise\nStep4: If you built labels correctly, you should see the next output.\nStep5: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.\nExercise\nStep6: Exercise\nStep7: If you build features correctly, it should look like that cell output below.\nStep8: Training, Validation, Test\nWith our data in nice shape, we'll split it into training, validation, and test sets.\nExercise\nStep9: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like\nStep10: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.\nExercise\nStep11: Embedding\nNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.\nExercise\nStep12: LSTM cell\n\nNext, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.\nTo create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation\nStep13: RNN forward pass\n\nNow we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\nAbove I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.\nExercise\nStep14: Output\nWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[\nStep15: Validation accuracy\nHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.\nStep16: Batching\nThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].\nStep17: Training\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.\nStep18: Testing"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nimport tensorflow as tf\nwith open('../sentiment-network/reviews.txt', 'r') as f:\n reviews = f.read()\nwith open('../sentiment-network/labels.txt', 'r') as f:\n labels = f.read()\nreviews[:2000]\nExplanation: Sentiment Analysis with an RNN\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.\nThe architecture for this network is shown below.\n\nHere, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.\nFrom the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.\nWe don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.\nEnd of explanation\nfrom string import punctuation\nall_text = ''.join([c for c in reviews if c not in punctuation])\nreviews = all_text.split('\\n')\nall_text = ' '.join(reviews)\nwords = all_text.split()\nall_text[:2000]\nwords[:100]\nExplanation: Data preprocessing\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\nYou can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \\n. To deal with those, I'm going to split the text into each review using \\n as the delimiter. Then I can combined all the reviews back together into one big string.\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.\nEnd of explanation\n# Create your dictionary that maps vocab words to integers here\nfrom collections import Counter\ncounts = Counter(words)\nvocab = sorted(counts, key=counts.get, reverse=True)\nvocab_to_int = {word: i for i, word in enumerate(vocab, 1)}\n# Convert the reviews to integers, same shape as reviews list, but with integers\nreviews_ints = []\nfor review in reviews:\n reviews_ints.append([vocab_to_int[word] for word in review.split()])\nExplanation: Encoding the words\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\nExercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.\nAlso, convert the reviews to integers and store the reviews in a new list called reviews_ints.\nEnd of explanation\n# Convert labels to 1s and 0s for 'positive' and 'negative'\nlabels = labels.split('\\n')\nlabels = np.array([1 if label == 'positive' else 0 for label in labels])\nExplanation: Encoding the labels\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\nExercise: Convert labels from positive and negative to 1 and 0, respectively.\nEnd of explanation\nfrom collections import Counter\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))\nprint(review_lens[10])\nExplanation: If you built labels correctly, you should see the next output.\nEnd of explanation\n# Filter out that review with 0 length\nreviews_ints = [each for each in reviews_ints if len(each) > 0]\nExplanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.\nExercise: First, remove the review with zero length from the reviews_ints list.\nEnd of explanation\nseq_len = 200\nfeatures = np.zeros((len(reviews), seq_len), dtype=int)\nfor i, row in enumerate(reviews_ints):\n features[i, -len(row):] = np.array(row)[:seq_len]\nExplanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.\nThis isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.\nEnd of explanation\nfeatures[:10,:100]\nExplanation: If you build features correctly, it should look like that cell output below.\nEnd of explanation\nsplit_frac = 0.8\nsplit_idx = int(len(features) * split_frac)\ntrain_x, val_x = features[:split_idx], features[split_idx:]\ntrain_y, val_y = labels[:split_idx], labels[split_idx:]\ntest_idx = int(len(val_x) * 0.5)\nval_x, test_x = val_x[:test_idx], val_x[test_idx:]\nval_y, test_y = val_y[:test_idx], val_y[test_idx:]\nprint(\"\\t\\t\\tFeature Shapes:\")\nprint(\"Train set: \\t\\t{}\".format(train_x.shape), \n \"\\nValidation set: \\t{}\".format(val_x.shape),\n \"\\nTest set: \\t\\t{}\".format(test_x.shape))\nExplanation: Training, Validation, Test\nWith our data in nice shape, we'll split it into training, validation, and test sets.\nExercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.\nEnd of explanation\nlstm_size = 256\nlstm_layers = 1\nbatch_size = 500\nlearning_rate = 0.001\nExplanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:\nFeature Shapes:\nTrain set: (20000, 200) \nValidation set: (2500, 200) \nTest set: (2500, 200)\nBuild the graph\nHere, we'll build the graph. First up, defining the hyperparameters.\nlstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.\nlstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.\nbatch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.\nlearning_rate: Learning rate\nEnd of explanation\nn_words = len(vocab_to_int)\n# Create the graph object\ngraph = tf.Graph()\n# Add nodes to the graph\nwith graph.as_default():\n inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')\n labels_ = tf.placeholder(tf.int32, [None, None], name='labels')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\nExplanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.\nExercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.\nEnd of explanation\n# Size of the embedding vectors (number of units in the embedding layer)\nembed_size = 300 \nwith graph.as_default():\n embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\nExplanation: Embedding\nNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.\nExercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].\nEnd of explanation\nwith graph.as_default():\n # Your basic LSTM cell\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n \n # Add dropout to the cell\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n \n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\n \n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)\nExplanation: LSTM cell\n\nNext, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.\nTo create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:\ntf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;)\nyou can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like \nlstm = tf.contrib.rnn.BasicLSTMCell(num_units)\nto create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like\ndrop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\nMost of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:\ncell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\nHere, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.\nSo the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.\nExercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.\nHere is a tutorial on building RNNs that will help you out.\nEnd of explanation\nwith graph.as_default():\n outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)\nExplanation: RNN forward pass\n\nNow we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\nAbove I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.\nExercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.\nEnd of explanation\nwith graph.as_default():\n predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)\n cost = tf.losses.mean_squared_error(labels_, predictions)\n \n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\nExplanation: Output\nWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.\nEnd of explanation\nwith graph.as_default():\n correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\nExplanation: Validation accuracy\nHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.\nEnd of explanation\ndef get_batches(x, y, batch_size=100):\n \n n_batches = len(x)//batch_size\n x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]\n for ii in range(0, len(x), batch_size):\n yield x[ii:ii+batch_size], y[ii:ii+batch_size]\nExplanation: Batching\nThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].\nEnd of explanation\nepochs = 10\nwith graph.as_default():\n saver = tf.train.Saver()\nwith tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n iteration = 1\n for e in range(epochs):\n state = sess.run(initial_state)\n \n for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 0.5,\n initial_state: state}\n loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)\n \n if iteration%5==0:\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Train loss: {:.3f}\".format(loss))\n if iteration%25==0:\n val_acc = []\n val_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for x, y in get_batches(val_x, val_y, batch_size):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: val_state}\n batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)\n val_acc.append(batch_acc)\n print(\"Val acc: {:.3f}\".format(np.mean(val_acc)))\n iteration +=1\n saver.save(sess, \"checkpoints/sentiment.ckpt\")\nExplanation: Training\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.\nEnd of explanation\ntest_acc = []\nwith tf.Session(graph=graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n test_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: test_state}\n batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)\n test_acc.append(batch_acc)\n print(\"Test accuracy: {:.3f}\".format(np.mean(test_acc)))\nExplanation: Testing\nEnd of explanation"}}},{"rowIdx":2144,"cells":{"Unnamed: 0":{"kind":"number","value":2144,"string":"2,144"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Homework assignment #3\nThese problem sets focus on using the Beautiful Soup library to scrape web pages.\nProblem Set #1\nStep1: Now, in the cell below, use Beautiful Soup to write an expression that evaluates to the number of &lt;h3&gt; tags contained in widgets2016.html.\nStep2: Now, in the cell below, write an expression or series of statements that displays the telephone number beneath the \"Widget Catalog\" header.\nStep3: In the cell below, use Beautiful Soup to write some code that prints the names of all the widgets on the page. After your code has executed, widget_names should evaluate to a list that looks like this (though not necessarily in this order)\nStep4: Problem set #2\nStep5: In the cell below, duplicate your code from the previous question. Modify the code to ensure that the values for price and quantity in each dictionary are floating-point numbers and integers, respectively. I.e., after executing the cell, your code should display something like this\nStep6: Great! I hope you're having fun. In the cell below, write an expression or series of statements that uses the widgets list created in the cell above to calculate the total number of widgets that the factory has in its warehouse.\nExpected output\nStep7: In the cell below, write some Python code that prints the names of widgets whose price is above $9.30.\nExpected output\nStep9: Problem set #3\nStep10: If our task was to create a dictionary that maps the name of the cheese to the description that follows in the &lt;p&gt; tag directly afterward, we'd be out of luck. Fortunately, Beautiful Soup has a .find_next_sibling() method, which allows us to search for the next tag that is a sibling of the tag you're calling it on (i.e., the two tags share a parent), that also matches particular criteria. So, for example, to accomplish the task outlined above\nStep11: With that knowledge in mind, let's go back to our widgets. In the cell below, write code that uses Beautiful Soup, and in particular the .find_next_sibling() method, to print the part numbers of the widgets that are in the table just beneath the header \"Hallowed Widgets.\"\nExpected output\nStep12: Okay, now, the final task. If you can accomplish this, you are truly an expert web scraper. I'll have little web scraper certificates made up and I'll give you one, if you manage to do this thing. And I know you can do it!\nIn the cell below, I've created a variable category_counts and assigned to it an empty dictionary. Write code to populate this dictionary so that its keys are \"categories\" of widgets (e.g., the contents of the &lt;h3&gt; tags on the page"},"code_prompt":{"kind":"string","value":"Python Code:\n!pip3 install bs4\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nhtml_str = urlopen(\"http://static.decontextualize.com/widgets2016.html\").read()\ndocument = BeautifulSoup(html_str, \"html.parser\")\nExplanation: Homework assignment #3\nThese problem sets focus on using the Beautiful Soup library to scrape web pages.\nProblem Set #1: Basic scraping\nI've made a web page for you to scrape. It's available here. The page concerns the catalog of a famous widget company. You'll be answering several questions about this web page. In the cell below, I've written some code so that you end up with a variable called html_str that contains the HTML source code of the page, and a variable document that stores a Beautiful Soup object.\nEnd of explanation\nh3_tags = document.find_all('h3')\nprint(\"There is\", len(h3_tags), \"“h3” tags in widgets2016.html.\")\nExplanation: Now, in the cell below, use Beautiful Soup to write an expression that evaluates to the number of &lt;h3&gt; tags contained in widgets2016.html.\nEnd of explanation\ntel = document.find('a', {'class': 'tel'})\nprint(\"The telephone number is\", tel.string)\nExplanation: Now, in the cell below, write an expression or series of statements that displays the telephone number beneath the \"Widget Catalog\" header.\nEnd of explanation\nwidget_names = document.find_all('td', {'class': 'wname'})\nfor name in widget_names:\n print(name.string)\nExplanation: In the cell below, use Beautiful Soup to write some code that prints the names of all the widgets on the page. After your code has executed, widget_names should evaluate to a list that looks like this (though not necessarily in this order):\nSkinner Widget\nWidget For Furtiveness\nWidget For Strawman\nJittery Widget\nSilver Widget\nDivided Widget\nManicurist Widget\nInfinite Widget\nYellow-Tipped Widget\nUnshakable Widget\nSelf-Knowledge Widget\nWidget For Cinema\nEnd of explanation\nwidgets = []\n# your code here\nwidget_infos = document.find_all('tr', {'class': 'winfo'})\nfor info in widget_infos:\n partno = info.find('td', {'class': 'partno'})\n price = info.find('td', {'class': 'price'})\n quantity = info.find('td', {'class': 'quantity'})\n wname = info.find('td', {'class': 'wname'})\n widgets.append({'partno': partno.string, 'price': price.string, 'quantity': quantity.string, 'wname': wname.string})\n# end your code\nwidgets\nExplanation: Problem set #2: Widget dictionaries\nFor this problem set, we'll continue to use the HTML page from the previous problem set. In the cell below, I've made an empty list and assigned it to a variable called widgets. Write code that populates this list with dictionaries, one dictionary per widget in the source file. The keys of each dictionary should be partno, wname, price, and quantity, and the value for each of the keys should be the value for the corresponding column for each row. After executing the cell, your list should look something like this:\n[{'partno': 'C1-9476',\n 'price': '$2.70',\n 'quantity': u'512',\n 'wname': 'Skinner Widget'},\n {'partno': 'JDJ-32/V',\n 'price': '$9.36',\n 'quantity': '967',\n 'wname': u'Widget For Furtiveness'},\n ...several items omitted...\n {'partno': '5B-941/F',\n 'price': '$13.26',\n 'quantity': '919',\n 'wname': 'Widget For Cinema'}]\nAnd this expression:\nwidgets[5]['partno']\n... should evaluate to:\nLH-74/O\nEnd of explanation\nwidgets = []\n# your code here\nwidget_infos = document.find_all('tr', {'class': 'winfo'})\nfor info in widget_infos:\n partno = info.find('td', {'class': 'partno'})\n price = info.find('td', {'class': 'price'})\n quantity = info.find('td', {'class': 'quantity'})\n wname = info.find('td', {'class': 'wname'})\n widgets.append({'partno': partno.string, 'price': float(price.string[1:]), 'quantity': int(quantity.string), 'wname': wname.string})\n# end your code\nwidgets\nExplanation: In the cell below, duplicate your code from the previous question. Modify the code to ensure that the values for price and quantity in each dictionary are floating-point numbers and integers, respectively. I.e., after executing the cell, your code should display something like this:\n[{'partno': 'C1-9476',\n 'price': 2.7,\n 'quantity': 512,\n 'widgetname': 'Skinner Widget'},\n {'partno': 'JDJ-32/V',\n 'price': 9.36,\n 'quantity': 967,\n 'widgetname': 'Widget For Furtiveness'},\n ... some items omitted ...\n {'partno': '5B-941/F',\n 'price': 13.26,\n 'quantity': 919,\n 'widgetname': 'Widget For Cinema'}]\n(Hint: Use the float() and int() functions. You may need to use string slices to convert the price field to a floating-point number.)\nEnd of explanation\ntotal_nb_widgets = 0\nfor widget in widgets:\n total_nb_widgets += widget['quantity']\nprint(total_nb_widgets)\nExplanation: Great! I hope you're having fun. In the cell below, write an expression or series of statements that uses the widgets list created in the cell above to calculate the total number of widgets that the factory has in its warehouse.\nExpected output: 7928\nEnd of explanation\nfor widget in widgets:\n if widget['price'] > 9.30:\n print(widget['wname'])\nExplanation: In the cell below, write some Python code that prints the names of widgets whose price is above $9.30.\nExpected output:\nWidget For Furtiveness\nJittery Widget\nSilver Widget\nInfinite Widget\nWidget For Cinema\nEnd of explanation\nexample_html = \n

Camembert

\n

A soft cheese made in the Camembert region of France.

\n

Cheddar

\n

A yellow cheese made in the Cheddar region of... France, probably, idk whatevs.

\nExplanation: Problem set #3: Sibling rivalries\nIn the following problem set, you will yet again be working with the data in widgets2016.html. In order to accomplish the tasks in this problem set, you'll need to learn about Beautiful Soup's .find_next_sibling() method. Here's some information about that method, cribbed from the notes:\nOften, the tags we're looking for don't have a distinguishing characteristic, like a class attribute, that allows us to find them using .find() and .find_all(), and the tags also aren't in a parent-child relationship. This can be tricky! For example, take the following HTML snippet, (which I've assigned to a string called example_html):\nEnd of explanation\nexample_doc = BeautifulSoup(example_html, \"html.parser\")\ncheese_dict = {}\nfor h2_tag in example_doc.find_all('h2'):\n cheese_name = h2_tag.string\n cheese_desc_tag = h2_tag.find_next_sibling('p')\n cheese_dict[cheese_name] = cheese_desc_tag.string\ncheese_dict\nExplanation: If our task was to create a dictionary that maps the name of the cheese to the description that follows in the &lt;p&gt; tag directly afterward, we'd be out of luck. Fortunately, Beautiful Soup has a .find_next_sibling() method, which allows us to search for the next tag that is a sibling of the tag you're calling it on (i.e., the two tags share a parent), that also matches particular criteria. So, for example, to accomplish the task outlined above:\nEnd of explanation\nhallowed_header = document.find('h3', text='Hallowed widgets')\nsibling_table = hallowed_header.find_next_sibling()\nfor part in sibling_table.find_all('td', {'class': 'partno'}):\n print(part.string)\nExplanation: With that knowledge in mind, let's go back to our widgets. In the cell below, write code that uses Beautiful Soup, and in particular the .find_next_sibling() method, to print the part numbers of the widgets that are in the table just beneath the header \"Hallowed Widgets.\"\nExpected output:\nMZ-556/B\nQV-730\nT1-9731\n5B-941/F\nEnd of explanation\ncategory_counts = {}\n# your code here\ncategories = document.find_all('h3')\nfor category in categories:\n table = category.find_next_sibling('table')\n widgets = table.select('td.wname')\n category_counts[category.string] = len(widgets)\n# end your code\ncategory_counts\nExplanation: Okay, now, the final task. If you can accomplish this, you are truly an expert web scraper. I'll have little web scraper certificates made up and I'll give you one, if you manage to do this thing. And I know you can do it!\nIn the cell below, I've created a variable category_counts and assigned to it an empty dictionary. Write code to populate this dictionary so that its keys are \"categories\" of widgets (e.g., the contents of the &lt;h3&gt; tags on the page: \"Forensic Widgets\", \"Mood widgets\", \"Hallowed Widgets\") and the value for each key is the number of widgets that occur in that category. I.e., after your code has been executed, the dictionary category_counts should look like this:\n{'Forensic Widgets': 3,\n 'Hallowed widgets': 4,\n 'Mood widgets': 2,\n 'Wondrous widgets': 3}\nEnd of explanation"}}},{"rowIdx":2145,"cells":{"Unnamed: 0":{"kind":"number","value":2145,"string":"2,145"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Pandana demo\nSam Maurer, July 2020\nThis notebook demonstrates the main features of the Pandana library, a Python package for network analysis that uses contraction hierarchies to calculate super-fast travel accessibility metrics and shortest paths.\nSee full documentation here\nStep1: Suppress scientific notation in the output.\nStep2: \n1. Loading data\nLoad street networks directly from Open Street Map\nThis requires installing a Pandana extension called OSMnet.\n- conda install osmnet or pip install osmnet\nYou can use http\nStep3: pandana.loaders.osm.pdna_network_from_bbox()\nStep4: What does the network look like?\nEdges and nodes are visible as DataFrames.\nStep5: Saving and reloading networks\nYou can't directly save a Pandana network object, but you can easily recreate it from the nodes and edges.\n- pandana.Network()\nStep6: \n2. Shortest paths\nThis functionality was added in Pandana v0.5.\nLoad some restaurant locations\nHere we'll load the locations of restaurants listed on Open Street Map (using the same OSMnet extension as above), and then calculate some shortest paths between them.\n- pandana.loaders.osm.node_query()\nStep7: Choose two at random\nStep8: Calculate the shortest route between them\nFirst, identify the nearest node to each restaurant.\n- network.get_node_ids()\nStep9: Then get the routing between the nodes.\n- network.shortest_path()\n- network.shortest_path_length()\n- network.shortest_path_lengths()\nStep10: This network's distance units are meters.\nCalculate many shortest paths at once\nPandana can generate several million shortest paths in less than a minute.\nStep11: Now we have the distance from each restaurant to each other restaurant.\nStep12: \n3. Proximity\nFind the closest restaurants to each node\nTo do a network-wide calculation like this, we first need to formally add the restaurants into the network.\n- network.set_pois()\nStep13: (The maxdist and maxitems parameters are the maximum distance and item counts you anticipate using in proximity searches, so that Pandana can optimize the caching.)\nNow we can run the query.\n - network.nearest_pois()\nStep14: These are the distances (in meters) and IDs of the three closest restaurants to each network node.\n\n4. Accessibility\nHow many restaurants are within 500 meters of each node?\nPandana calls this kind of calculation an aggregation. It requires passing a list of network nodes and associated values. \nIn this case, the \"value\" is just presence of a restaurant, but it could also be characteristics like square footage of a building or income of a household.\nnetwork.get_node_ids()\nnetwork.set()\nStep15: Now we can run the query.\nnetwork.aggregate()\nStep16: Note that you can also calculate means, sums, percentiles, and other metrics, as well as applying linear or exponential \"decay\" to more distant values.\n\n5. Visualization\nPandana's built-in plot function uses an older Matplotlib extension called Basemap that's now difficult to install. So here we'll just use Matplotlib directly.\nMapping restaurant accessibility"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy as np\nimport pandas as pd\nimport pandana\nprint(pandana.__version__)\nExplanation: Pandana demo\nSam Maurer, July 2020\nThis notebook demonstrates the main features of the Pandana library, a Python package for network analysis that uses contraction hierarchies to calculate super-fast travel accessibility metrics and shortest paths.\nSee full documentation here: http://udst.github.io/pandana/\nSections\n1. Loading data\n2. Shortest paths\n3. Proximity\n4. Accessibility\n5. Visualization\nEnd of explanation\npd.options.display.float_format = '{:.2f}'.format\nExplanation: Suppress scientific notation in the output.\nEnd of explanation\nfrom pandana.loaders import osm\nimport warnings\nwarnings.filterwarnings('ignore')\nExplanation: \n1. Loading data\nLoad street networks directly from Open Street Map\nThis requires installing a Pandana extension called OSMnet.\n- conda install osmnet or pip install osmnet\nYou can use http://boundingbox.klokantech.com/ to get the coordinates of bounding boxes.\nEnd of explanation\nnetwork = osm.pdna_network_from_bbox(37.698, -122.517, 37.819, -122.354) # San Francisco, CA\nExplanation: pandana.loaders.osm.pdna_network_from_bbox()\nEnd of explanation\nnetwork.nodes_df.head()\nnetwork.edges_df.head()\nExplanation: What does the network look like?\nEdges and nodes are visible as DataFrames.\nEnd of explanation\nnetwork.nodes_df.to_csv('nodes.csv')\nnetwork.edges_df.to_csv('edges.csv')\nnodes = pd.read_csv('nodes.csv', index_col=0)\nedges = pd.read_csv('edges.csv', index_col=[0,1])\nnetwork = pandana.Network(nodes['x'], nodes['y'], \n edges['from'], edges['to'], edges[['distance']])\nExplanation: Saving and reloading networks\nYou can't directly save a Pandana network object, but you can easily recreate it from the nodes and edges.\n- pandana.Network()\nEnd of explanation\nrestaurants = osm.node_query(\n 37.698, -122.517, 37.819, -122.354, tags='\"amenity\"=\"restaurant\"')\nExplanation: \n2. Shortest paths\nThis functionality was added in Pandana v0.5.\nLoad some restaurant locations\nHere we'll load the locations of restaurants listed on Open Street Map (using the same OSMnet extension as above), and then calculate some shortest paths between them.\n- pandana.loaders.osm.node_query()\nEnd of explanation\nres = restaurants.sample(2)\nres\nExplanation: Choose two at random:\nEnd of explanation\nnodes = network.get_node_ids(res.lon, res.lat).values\nnodes\nExplanation: Calculate the shortest route between them\nFirst, identify the nearest node to each restaurant.\n- network.get_node_ids()\nEnd of explanation\nnetwork.shortest_path(nodes[0], nodes[1])\nnetwork.shortest_path_length(nodes[0], nodes[1])\nExplanation: Then get the routing between the nodes.\n- network.shortest_path()\n- network.shortest_path_length()\n- network.shortest_path_lengths()\nEnd of explanation\nrestaurant_nodes = network.get_node_ids(restaurants.lon, restaurants.lat).values\norigs = [o for o in restaurant_nodes for d in restaurant_nodes]\ndests = [d for o in restaurant_nodes for d in restaurant_nodes]\n%%time\ndistances = network.shortest_path_lengths(origs, dests)\nExplanation: This network's distance units are meters.\nCalculate many shortest paths at once\nPandana can generate several million shortest paths in less than a minute.\nEnd of explanation\npd.Series(distances).describe()\nExplanation: Now we have the distance from each restaurant to each other restaurant.\nEnd of explanation\nnetwork.set_pois(category = 'restaurants',\n maxdist = 1000,\n maxitems = 3,\n x_col = restaurants.lon, \n y_col = restaurants.lat)\nExplanation: \n3. Proximity\nFind the closest restaurants to each node\nTo do a network-wide calculation like this, we first need to formally add the restaurants into the network.\n- network.set_pois()\nEnd of explanation\nresults = network.nearest_pois(distance = 1000,\n category = 'restaurants',\n num_pois = 3,\n include_poi_ids = True)\nresults.head()\nExplanation: (The maxdist and maxitems parameters are the maximum distance and item counts you anticipate using in proximity searches, so that Pandana can optimize the caching.)\nNow we can run the query.\n - network.nearest_pois()\nEnd of explanation\nrestaurant_nodes = network.get_node_ids(restaurants.lon, restaurants.lat)\nnetwork.set(restaurant_nodes, \n name = 'restaurants')\nExplanation: These are the distances (in meters) and IDs of the three closest restaurants to each network node.\n\n4. Accessibility\nHow many restaurants are within 500 meters of each node?\nPandana calls this kind of calculation an aggregation. It requires passing a list of network nodes and associated values. \nIn this case, the \"value\" is just presence of a restaurant, but it could also be characteristics like square footage of a building or income of a household.\nnetwork.get_node_ids()\nnetwork.set()\nEnd of explanation\naccessibility = network.aggregate(distance = 500,\n type = 'count',\n name = 'restaurants')\naccessibility.describe()\nExplanation: Now we can run the query.\nnetwork.aggregate()\nEnd of explanation\nimport matplotlib\nfrom matplotlib import pyplot as plt\nprint(matplotlib.__version__)\nfig, ax = plt.subplots(figsize=(10,8))\nplt.title('San Francisco: Restaurants within 500m')\nplt.scatter(network.nodes_df.x, network.nodes_df.y, \n c=accessibility, s=1, cmap='YlOrRd', \n norm=matplotlib.colors.LogNorm())\ncb = plt.colorbar()\nplt.show()\nExplanation: Note that you can also calculate means, sums, percentiles, and other metrics, as well as applying linear or exponential \"decay\" to more distant values.\n\n5. Visualization\nPandana's built-in plot function uses an older Matplotlib extension called Basemap that's now difficult to install. So here we'll just use Matplotlib directly.\nMapping restaurant accessibility\nEnd of explanation"}}},{"rowIdx":2146,"cells":{"Unnamed: 0":{"kind":"number","value":2146,"string":"2,146"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Chapter 7 – Ensemble Learning and Random Forests\nThis notebook contains all the sample code and solutions to the exercises in chapter 7.\n\n \n
\n \n \n Run in Google Colab\n
\nWarning: this is the code for the 1st edition of the book. Please visit https://github.com/ageron/handson-ml2 for the 2nd edition code, with up-to-date notebooks using the latest library versions.\nSetup\nFirst, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:\nEnd of explanation\nheads_proba = 0.51\ncoin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)\ncumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)\nplt.figure(figsize=(8,3.5))\nplt.plot(cumulative_heads_ratio)\nplt.plot([0, 10000], [0.51, 0.51], \"k--\", linewidth=2, label=\"51%\")\nplt.plot([0, 10000], [0.5, 0.5], \"k-\", label=\"50%\")\nplt.xlabel(\"Number of coin tosses\")\nplt.ylabel(\"Heads ratio\")\nplt.legend(loc=\"lower right\")\nplt.axis([0, 10000, 0.42, 0.58])\nsave_fig(\"law_of_large_numbers_plot\")\nplt.show()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import make_moons\nX, y = make_moons(n_samples=500, noise=0.30, random_state=42)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\nExplanation: Voting classifiers\nEnd of explanation\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nlog_clf = LogisticRegression(solver=\"liblinear\", random_state=42)\nrnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)\nsvm_clf = SVC(gamma=\"auto\", random_state=42)\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\nvoting_clf.fit(X_train, y_train)\nfrom sklearn.metrics import accuracy_score\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\nlog_clf = LogisticRegression(solver=\"liblinear\", random_state=42)\nrnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)\nsvm_clf = SVC(gamma=\"auto\", probability=True, random_state=42)\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='soft')\nvoting_clf.fit(X_train, y_train)\nfrom sklearn.metrics import accuracy_score\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\nExplanation: Warning: In Scikit-Learn 0.20, some hyperparameters (solver, n_estimators, gamma, etc.) start issuing warnings about the fact that their default value will change in Scikit-Learn 0.22. To avoid these warnings and ensure that this notebooks keeps producing the same outputs as in the book, I set the hyperparameters to their old default value. In your own code, you can simply rely on the latest default values instead.\nEnd of explanation\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, y_pred))\ntree_clf = DecisionTreeClassifier(random_state=42)\ntree_clf.fit(X_train, y_train)\ny_pred_tree = tree_clf.predict(X_test)\nprint(accuracy_score(y_test, y_pred_tree))\nfrom matplotlib.colors import ListedColormap\ndef plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)\n if contour:\n custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"yo\", alpha=alpha)\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"bs\", alpha=alpha)\n plt.axis(axes)\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_decision_boundary(tree_clf, X, y)\nplt.title(\"Decision Tree\", fontsize=14)\nplt.subplot(122)\nplot_decision_boundary(bag_clf, X, y)\nplt.title(\"Decision Trees with Bagging\", fontsize=14)\nsave_fig(\"decision_tree_without_and_with_bagging_plot\")\nplt.show()\nExplanation: Bagging ensembles\nEnd of explanation\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(splitter=\"random\", max_leaf_nodes=16, random_state=42),\n n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nfrom sklearn.ensemble import RandomForestClassifier\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1, random_state=42)\nrnd_clf.fit(X_train, y_train)\ny_pred_rf = rnd_clf.predict(X_test)\nnp.sum(y_pred == y_pred_rf) / len(y_pred) # almost identical predictions\nfrom sklearn.datasets import load_iris\niris = load_iris()\nrnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, random_state=42)\nrnd_clf.fit(iris[\"data\"], iris[\"target\"])\nfor name, score in zip(iris[\"feature_names\"], rnd_clf.feature_importances_):\n print(name, score)\nrnd_clf.feature_importances_\nplt.figure(figsize=(6, 4))\nfor i in range(15):\n tree_clf = DecisionTreeClassifier(max_leaf_nodes=16, random_state=42 + i)\n indices_with_replacement = np.random.randint(0, len(X_train), len(X_train))\n tree_clf.fit(X[indices_with_replacement], y[indices_with_replacement])\n plot_decision_boundary(tree_clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.02, contour=False)\nplt.show()\nExplanation: Random Forests\nEnd of explanation\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n bootstrap=True, n_jobs=-1, oob_score=True, random_state=40)\nbag_clf.fit(X_train, y_train)\nbag_clf.oob_score_\nbag_clf.oob_decision_function_\nfrom sklearn.metrics import accuracy_score\ny_pred = bag_clf.predict(X_test)\naccuracy_score(y_test, y_pred)\nExplanation: Out-of-Bag evaluation\nEnd of explanation\ntry:\n from sklearn.datasets import fetch_openml\n mnist = fetch_openml('mnist_784', version=1, as_frame=False)\n mnist.target = mnist.target.astype(np.int64)\nexcept ImportError:\n from sklearn.datasets import fetch_mldata\n mnist = fetch_mldata('MNIST original')\nrnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)\nrnd_clf.fit(mnist[\"data\"], mnist[\"target\"])\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap = mpl.cm.hot,\n interpolation=\"nearest\")\n plt.axis(\"off\")\nplot_digit(rnd_clf.feature_importances_)\ncbar = plt.colorbar(ticks=[rnd_clf.feature_importances_.min(), rnd_clf.feature_importances_.max()])\ncbar.ax.set_yticklabels(['Not important', 'Very important'])\nsave_fig(\"mnist_feature_importance_plot\")\nplt.show()\nExplanation: Feature importance\nEnd of explanation\nfrom sklearn.ensemble import AdaBoostClassifier\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)\nplot_decision_boundary(ada_clf, X, y)\nm = len(X_train)\nplt.figure(figsize=(11, 4))\nfor subplot, learning_rate in ((121, 1), (122, 0.5)):\n sample_weights = np.ones(m)\n plt.subplot(subplot)\n for i in range(5):\n svm_clf = SVC(kernel=\"rbf\", C=0.05, gamma=\"auto\", random_state=42)\n svm_clf.fit(X_train, y_train, sample_weight=sample_weights)\n y_pred = svm_clf.predict(X_train)\n sample_weights[y_pred != y_train] *= (1 + learning_rate)\n plot_decision_boundary(svm_clf, X, y, alpha=0.2)\n plt.title(\"learning_rate = {}\".format(learning_rate), fontsize=16)\n if subplot == 121:\n plt.text(-0.7, -0.65, \"1\", fontsize=14)\n plt.text(-0.6, -0.10, \"2\", fontsize=14)\n plt.text(-0.5, 0.10, \"3\", fontsize=14)\n plt.text(-0.4, 0.55, \"4\", fontsize=14)\n plt.text(-0.3, 0.90, \"5\", fontsize=14)\nsave_fig(\"boosting_plot\")\nplt.show()\nlist(m for m in dir(ada_clf) if not m.startswith(\"_\") and m.endswith(\"_\"))\nExplanation: AdaBoost\nEnd of explanation\nnp.random.seed(42)\nX = np.random.rand(100, 1) - 0.5\ny = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)\nfrom sklearn.tree import DecisionTreeRegressor\ntree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg1.fit(X, y)\ny2 = y - tree_reg1.predict(X)\ntree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg2.fit(X, y2)\ny3 = y2 - tree_reg2.predict(X)\ntree_reg3 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg3.fit(X, y3)\nX_new = np.array([[0.8]])\ny_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))\ny_pred\ndef plot_predictions(regressors, X, y, axes, label=None, style=\"r-\", data_style=\"b.\", data_label=None):\n x1 = np.linspace(axes[0], axes[1], 500)\n y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)\n plt.plot(X[:, 0], y, data_style, label=data_label)\n plt.plot(x1, y_pred, style, linewidth=2, label=label)\n if label or data_label:\n plt.legend(loc=\"upper center\", fontsize=16)\n plt.axis(axes)\nplt.figure(figsize=(11,11))\nplt.subplot(321)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h_1(x_1)$\", style=\"g-\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Residuals and tree predictions\", fontsize=16)\nplt.subplot(322)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1)$\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Ensemble predictions\", fontsize=16)\nplt.subplot(323)\nplot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_2(x_1)$\", style=\"g-\", data_style=\"k+\", data_label=\"Residuals\")\nplt.ylabel(\"$y - h_1(x_1)$\", fontsize=16)\nplt.subplot(324)\nplot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1)$\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.subplot(325)\nplot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_3(x_1)$\", style=\"g-\", data_style=\"k+\")\nplt.ylabel(\"$y - h_1(x_1) - h_2(x_1)$\", fontsize=16)\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.subplot(326)\nplot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$\")\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nsave_fig(\"gradient_boosting_plot\")\nplt.show()\nfrom sklearn.ensemble import GradientBoostingRegressor\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0, random_state=42)\ngbrt.fit(X, y)\ngbrt_slow = GradientBoostingRegressor(max_depth=2, n_estimators=200, learning_rate=0.1, random_state=42)\ngbrt_slow.fit(X, y)\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"Ensemble predictions\")\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt.learning_rate, gbrt.n_estimators), fontsize=14)\nplt.subplot(122)\nplot_predictions([gbrt_slow], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt_slow.learning_rate, gbrt_slow.n_estimators), fontsize=14)\nsave_fig(\"gbrt_learning_rate_plot\")\nplt.show()\nExplanation: Gradient Boosting\nEnd of explanation\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nX_train, X_val, y_train, y_val = train_test_split(X, y, random_state=49)\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)\ngbrt.fit(X_train, y_train)\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in gbrt.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors) + 1\ngbrt_best = GradientBoostingRegressor(max_depth=2,n_estimators=bst_n_estimators, random_state=42)\ngbrt_best.fit(X_train, y_train)\nmin_error = np.min(errors)\nplt.figure(figsize=(11, 4))\nplt.subplot(121)\nplt.plot(errors, \"b.-\")\nplt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], \"k--\")\nplt.plot([0, 120], [min_error, min_error], \"k--\")\nplt.plot(bst_n_estimators, min_error, \"ko\")\nplt.text(bst_n_estimators, min_error*1.2, \"Minimum\", ha=\"center\", fontsize=14)\nplt.axis([0, 120, 0, 0.01])\nplt.xlabel(\"Number of trees\")\nplt.title(\"Validation error\", fontsize=14)\nplt.subplot(122)\nplot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"Best model (%d trees)\" % bst_n_estimators, fontsize=14)\nsave_fig(\"early_stopping_gbrt_plot\")\nplt.show()\ngbrt = GradientBoostingRegressor(max_depth=2, warm_start=True, random_state=42)\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\nprint(gbrt.n_estimators)\nprint(\"Minimum validation MSE:\", min_val_error)\nExplanation: Gradient Boosting with Early stopping\nEnd of explanation\ntry:\n import xgboost\nexcept ImportError as ex:\n print(\"Error: the xgboost library is not installed.\")\n xgboost = None\nif xgboost is not None: # not shown in the book\n xgb_reg = xgboost.XGBRegressor(random_state=42)\n xgb_reg.fit(X_train, y_train)\n y_pred = xgb_reg.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n print(\"Validation MSE:\", val_error)\nif xgboost is not None: # not shown in the book\n xgb_reg.fit(X_train, y_train,\n eval_set=[(X_val, y_val)], early_stopping_rounds=2)\n y_pred = xgb_reg.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n print(\"Validation MSE:\", val_error)\n%timeit xgboost.XGBRegressor().fit(X_train, y_train) if xgboost is not None else None\n%timeit GradientBoostingRegressor().fit(X_train, y_train)\nExplanation: Using XGBoost\nEnd of explanation\nfrom sklearn.model_selection import train_test_split\nX_train_val, X_test, y_train_val, y_test = train_test_split(\n mnist.data, mnist.target, test_size=10000, random_state=42)\nX_train, X_val, y_train, y_val = train_test_split(\n X_train_val, y_train_val, test_size=10000, random_state=42)\nExplanation: Exercise solutions\n1. to 7.\nSee Appendix A.\n8. Voting Classifier\nExercise: Load the MNIST data and split it into a training set, a validation set, and a test set (e.g., use 50,000 instances for training, 10,000 for validation, and 10,000 for testing).\nThe MNIST dataset was loaded earlier.\nEnd of explanation\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.neural_network import MLPClassifier\nrandom_forest_clf = RandomForestClassifier(n_estimators=10, random_state=42)\nextra_trees_clf = ExtraTreesClassifier(n_estimators=10, random_state=42)\nsvm_clf = LinearSVC(random_state=42)\nmlp_clf = MLPClassifier(random_state=42)\nestimators = [random_forest_clf, extra_trees_clf, svm_clf, mlp_clf]\nfor estimator in estimators:\n print(\"Training the\", estimator)\n estimator.fit(X_train, y_train)\n[estimator.score(X_val, y_val) for estimator in estimators]\nExplanation: Exercise: Then train various classifiers, such as a Random Forest classifier, an Extra-Trees classifier, and an SVM.\nEnd of explanation\nfrom sklearn.ensemble import VotingClassifier\nnamed_estimators = [\n (\"random_forest_clf\", random_forest_clf),\n (\"extra_trees_clf\", extra_trees_clf),\n (\"svm_clf\", svm_clf),\n (\"mlp_clf\", mlp_clf),\n]\nvoting_clf = VotingClassifier(named_estimators)\nvoting_clf.fit(X_train, y_train)\nvoting_clf.score(X_val, y_val)\n[estimator.score(X_val, y_val) for estimator in voting_clf.estimators_]\nExplanation: The linear SVM is far outperformed by the other classifiers. However, let's keep it for now since it may improve the voting classifier's performance.\nExercise: Next, try to combine them into an ensemble that outperforms them all on the validation set, using a soft or hard voting classifier.\nEnd of explanation\nvoting_clf.set_params(svm_clf=None)\nExplanation: Let's remove the SVM to see if performance improves. It is possible to remove an estimator by setting it to None using set_params() like this:\nEnd of explanation\nvoting_clf.estimators\nExplanation: This updated the list of estimators:\nEnd of explanation\nvoting_clf.estimators_\nExplanation: However, it did not update the list of trained estimators:\nEnd of explanation\ndel voting_clf.estimators_[2]\nExplanation: So we can either fit the VotingClassifier again, or just remove the SVM from the list of trained estimators:\nEnd of explanation\nvoting_clf.score(X_val, y_val)\nExplanation: Now let's evaluate the VotingClassifier again:\nEnd of explanation\nvoting_clf.voting = \"soft\"\nvoting_clf.score(X_val, y_val)\nExplanation: A bit better! The SVM was hurting performance. Now let's try using a soft voting classifier. We do not actually need to retrain the classifier, we can just set voting to \"soft\":\nEnd of explanation\nvoting_clf.score(X_test, y_test)\n[estimator.score(X_test, y_test) for estimator in voting_clf.estimators_]\nExplanation: That's a significant improvement, and it's much better than each of the individual classifiers.\nOnce you have found one, try it on the test set. How much better does it perform compared to the individual classifiers?\nEnd of explanation\nX_val_predictions = np.empty((len(X_val), len(estimators)), dtype=np.float32)\nfor index, estimator in enumerate(estimators):\n X_val_predictions[:, index] = estimator.predict(X_val)\nX_val_predictions\nrnd_forest_blender = RandomForestClassifier(n_estimators=200, oob_score=True, random_state=42)\nrnd_forest_blender.fit(X_val_predictions, y_val)\nrnd_forest_blender.oob_score_\nExplanation: The voting classifier reduced the error rate from about 4.0% for our best model (the MLPClassifier) to just 3.1%. That's about 22.5% less errors, not bad!\n9. Stacking Ensemble\nExercise: Run the individual classifiers from the previous exercise to make predictions on the validation set, and create a new training set with the resulting predictions: each training instance is a vector containing the set of predictions from all your classifiers for an image, and the target is the image's class. Train a classifier on this new training set.\nEnd of explanation\nX_test_predictions = np.empty((len(X_test), len(estimators)), dtype=np.float32)\nfor index, estimator in enumerate(estimators):\n X_test_predictions[:, index] = estimator.predict(X_test)\ny_pred = rnd_forest_blender.predict(X_test_predictions)\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\nExplanation: You could fine-tune this blender or try other types of blenders (e.g., an MLPClassifier), then select the best one using cross-validation, as always.\nExercise: Congratulations, you have just trained a blender, and together with the classifiers they form a stacking ensemble! Now let's evaluate the ensemble on the test set. For each image in the test set, make predictions with all your classifiers, then feed the predictions to the blender to get the ensemble's predictions. How does it compare to the voting classifier you trained earlier?\nEnd of explanation"}}},{"rowIdx":2147,"cells":{"Unnamed: 0":{"kind":"number","value":2147,"string":"2,147"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Post-processing Examples\nThis notebook provides some examples for using the post-processing features in RESSPyLab.\nAutomatic table generation and calculation of the consistency metric $\\xi_2$ are shown for both the original and updated Voce-Chaboche (UVC) models.\nNote that there is an example for plotting output in each of the calibration examples.\nStep1: Original Voce-Chaboche model\nFirst we will use RESSPyLab to generate a formatted table of parameters including the relative error metric, $\\bar{\\varphi}$.\nThe inputs to this function are\nStep2: Tables can be easily generated following a standard format for several data sets by appending additional entries to the lists of values in material_def and to x_logs_all and data_all.\nNow we will generate the consistency metric, $\\xi_2$.\nThe input arguments are\nStep3: The value of $\\xi_2 = 65$ %, indicating that the two sets of parameters are inconsistent for this data set.\nUpdated Voce-Chaboche model\nThe inputs to generate the tables are the same as for the original model, however the input parameters have to come from optimization using the updated model."},"code_prompt":{"kind":"string","value":"Python Code:\n# First load RESSPyLab and necessary packages\nimport numpy as np\nimport RESSPyLab as rpl\nExplanation: Post-processing Examples\nThis notebook provides some examples for using the post-processing features in RESSPyLab.\nAutomatic table generation and calculation of the consistency metric $\\xi_2$ are shown for both the original and updated Voce-Chaboche (UVC) models.\nNote that there is an example for plotting output in each of the calibration examples.\nEnd of explanation\n# Identify the material\nmaterial_def = {'material_id': ['Example 1'], 'load_protocols': ['1,5']}\n# Set the path to the x log file\nx_log_file_1 = './output/x_log.txt'\nx_logs_all = [x_log_file_1]\n# Load the data\ndata_files_1 = ['example_1.csv']\ndata_1 = rpl.load_data_set(data_files_1)\ndata_all = [data_1]\n# Make the tables\nparam_table, metric_table = rpl.summary_tables_maker_vc(material_def, x_logs_all, data_all)\nExplanation: Original Voce-Chaboche model\nFirst we will use RESSPyLab to generate a formatted table of parameters including the relative error metric, $\\bar{\\varphi}$.\nThe inputs to this function are: \n1. Information about the name of the data set and the load protocols used in the optimization.\n2. The file containing the history of parameters (generated from the optimization).\n3. The data used in the optimization.\nTwo tables are returned (as pandas DataFrames) and are printed to screen in LaTeX format.\nIf you want the tables in some other format it is best to operate on the DataFrames directly (e.g., use to_csv()).\nEnd of explanation\n# Load the base parameters, we want the last entry in the file\nx_base = np.loadtxt(x_log_file_1, delimiter=' ')\nx_base = x_base[-1]\n# Load (or set) the sample parameters\nx_sample = np.array([179750., 318.47, 100.72, 8.00, 11608.17, 145.22, 1026.33, 4.68])\n# Calculate the metric\nconsistency_metric = rpl.vc_consistency_metric(x_base, x_sample, data_1)\nprint consistency_metric\nExplanation: Tables can be easily generated following a standard format for several data sets by appending additional entries to the lists of values in material_def and to x_logs_all and data_all.\nNow we will generate the consistency metric, $\\xi_2$.\nThe input arguments are:\n1. The parameters of the base case.\n2. The parameters of the case that you would like to compare with.\n3. The set of data to compute this metric over.\nThe metric is returned (the raw value, NOT as a percent) directly from this function.\nEnd of explanation\n# Identify the material\nmaterial_def = {'material_id': ['Example 1'], 'load_protocols': ['1']}\n# Set the path to the x log file\nx_log_file_2 = './output/x_log_upd.txt'\nx_logs_all = [x_log_file_2]\n# Load the data\ndata_files_2 = ['example_1.csv']\ndata_2 = rpl.load_data_set(data_files_2)\ndata_all = [data_2]\n# Make the tables\nparam_table, metric_table = rpl.summary_tables_maker_uvc(material_def, x_logs_all, data_all)\nExplanation: The value of $\\xi_2 = 65$ %, indicating that the two sets of parameters are inconsistent for this data set.\nUpdated Voce-Chaboche model\nThe inputs to generate the tables are the same as for the original model, however the input parameters have to come from optimization using the updated model.\nEnd of explanation"}}},{"rowIdx":2148,"cells":{"Unnamed: 0":{"kind":"number","value":2148,"string":"2,148"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Define and Preview Sets\nStep1: Define Metric\nAlso, show values\nStep2: Clip and compare\nWe are going to create a comparison object which contains sets that are proper subsets of the original (we will be dividing the number of samples in half). However, since the Voronoi cells that are implicitly defined and consitute the $\\sigma$-algebra are going to be fundamentally different, we observe that the two densities reflect the differences in geometry. \nOur chosen densities are uniform and centered in the middle of the domain. The integration sample set is copied during the clipping procedure by default, but can be changed by passing copy=False to clip if you prefer the two comparisons are linked.\nStep3: Observe how these are distinctly different objects in memory\nStep4: Density Emulation\nWe will now estimate the densities on the two comparison objects (remember, one is a clipped version of the other, but they share the same integration_sample_set).\nStep5: Clipped\nStep6: Distances\nStep7: Interactive Demonstration of compP.density\nThis will require ipywidgets. It is a minimalistic example of using the density method without the comparison class.\nStep8: Below, we show an example of using the comparison object to get a better picture of the sets defined above, without necessarily needing to compare two measures."},"code_prompt":{"kind":"string","value":"Python Code:\nnum_samples_left = 50\nnum_samples_right = 50\ndelta = 0.5 # width of measure's support per dimension\nL = unit_center_set(2, num_samples_left, delta)\nR = unit_center_set(2, num_samples_right, delta)\nplt.scatter(L._values[:,0], L._values[:,1], c=L._probabilities)\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.show()\nplt.scatter(R._values[:,0], R._values[:,1], c=R._probabilities)\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.show()\nExplanation: Define and Preview Sets\nEnd of explanation\nnum_emulation_samples = 2000 \nmm = compP.compare(L, R, num_emulation_samples) # initialize metric\n# mm.get_left().get_values()\n# mm.get_right().get_values()\nExplanation: Define Metric\nAlso, show values\nEnd of explanation\n# cut both sample sets in half\nmc = mm.clip(num_samples_left//2,num_samples_right//2)\n# mc.get_left().get_values()\n# mc.get_right().get_values()\nExplanation: Clip and compare\nWe are going to create a comparison object which contains sets that are proper subsets of the original (we will be dividing the number of samples in half). However, since the Voronoi cells that are implicitly defined and consitute the $\\sigma$-algebra are going to be fundamentally different, we observe that the two densities reflect the differences in geometry. \nOur chosen densities are uniform and centered in the middle of the domain. The integration sample set is copied during the clipping procedure by default, but can be changed by passing copy=False to clip if you prefer the two comparisons are linked.\nEnd of explanation\nmm, mc\nExplanation: Observe how these are distinctly different objects in memory:\nEnd of explanation\nld1,rd1 = mm.estimate_density()\nI = mc.get_emulated().get_values()\nplt.scatter(I[:,0], I[:,1], c=rd1,s =10, alpha=0.5)\nplt.scatter(R._values[:,0], R._values[:,1], marker='o', s=50, c='k')\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.title(\"Right Density\")\nplt.show()\nplt.scatter(I[:,0], I[:,1], c=ld1, s=10, alpha=0.5)\nplt.scatter(L._values[:,0], L._values[:,1], marker='o', s=50, c='k')\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.title(\"Left Density\")\nplt.show()\nExplanation: Density Emulation\nWe will now estimate the densities on the two comparison objects (remember, one is a clipped version of the other, but they share the same integration_sample_set).\nEnd of explanation\nld2,rd2 = mc.estimate_density()\nplt.scatter(I[:,0], I[:,1], c=rd2,s =10, alpha=0.5)\nplt.scatter(mc.get_right()._values[:,0],\n mc.get_right()._values[:,1], \n marker='o', s=50, c='k')\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.title(\"Right Density\")\nplt.show()\nplt.scatter(I[:,0], I[:,1], c=ld2, s=10, alpha=0.5)\nplt.scatter(mc.get_left()._values[:,0], \n mc.get_left()._values[:,1], \n marker='o', s=50, c='k')\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.title(\"Left Density\")\nplt.show()\nExplanation: Clipped\nEnd of explanation\nfrom scipy.stats import entropy as kl_div\nmm.set_left(unit_center_set(2, 1000, delta/2))\nmm.set_right(unit_center_set(2, 1000, delta))\nprint([mm.value(kl_div),\n mm.value('tv'),\n mm.value('totvar'),\n mm.value('mink', w=0.5, p=1),\n mm.value('norm'),\n mm.value('sqhell'),\n mm.value('hell'),\n mm.value('hellinger')])\nExplanation: Distances\nEnd of explanation\nimport ipywidgets as wd\ndef show_clip(samples=100, delta=0.5):\n np.random.seed(int(121))\n S = unit_center_set(2, samples, delta)\n compP.density(S)\n plt.figure()\n plt.scatter(S._values[:,0], S._values[:,1], \n c=S._density.ravel())\n plt.show()\nwd.interact(show_clip, samples=(20,500), delta=(0.05,1,0.05))\nExplanation: Interactive Demonstration of compP.density\nThis will require ipywidgets. It is a minimalistic example of using the density method without the comparison class.\nEnd of explanation\nimport scipy.stats as sstats\ndef show_clipm(samples=100, delta=0.5):\n np.random.seed(int(121))\n S = unit_center_set(2, samples, delta)\n \n # alternative probabilities\n xprobs = sstats.distributions.norm(0.5, delta).pdf(S._values[:,0])\n yprobs = sstats.distributions.norm(0.5, delta).pdf(S._values[:,1])\n probs = xprobs*yprobs\n S.set_probabilities(probs*S._volumes)\n \n I = mm.get_emulated()\n m = compP.comparison(I,S,None)\n m.estimate_density_left()\n plt.figure()\n plt.scatter(I._values[:,0], I._values[:,1], \n c=S._emulated_density.ravel())\n plt.scatter([0.5], [0.5], marker='x')\n plt.show()\nwd.interact(show_clipm, samples=(20,500), delta=(0.1,1,0.05))\nExplanation: Below, we show an example of using the comparison object to get a better picture of the sets defined above, without necessarily needing to compare two measures.\nEnd of explanation"}}},{"rowIdx":2149,"cells":{"Unnamed: 0":{"kind":"number","value":2149,"string":"2,149"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Using Sylbreak in Jupyter Notebook\nဒီ Jupyter Notebook က GitHub မှာ ကျွန်တော်တင်ပေးထားတဲ့ Sylbreak Python ပရိုဂရမ် https\nStep2: စိတ်ထဲမှာ ပေါ်လာတာကို ကောက်ရေးပြီးတော့ syllable segmentation လုပ်ခိုင်းလိုက်တာပါ။ \nStep3: Typing order\nမြန်မာစာနဲ့ ပတ်သက်တဲ့ NLP (Natural Language Processing) အလုပ် တစ်ခုခု လုပ်ဖို့အတွက် syllable segmentation လုပ်ကြမယ်ဆိုရင် တကယ်တမ်းက မလုပ်ခင်မှာ၊ မြန်မာစာ စာကြောင်းတွေရဲ့ typing order အပါအဝင် တခြား ဖြစ်တတ်တဲ့ အမှားတွေကိုလည်း cleaning လုပ်ရပါတယ်။ အဲဒီလိုမလုပ်ရင် sylbreak က ကျွန်တော် အကြမ်းမျဉ်းသတ်မှတ်ထားတဲ့ မြန်မာစာ syllable unit တွေအဖြစ် မှန်မှန်ကန်ကန် ဖြတ်ပေးနိုင်မှာ မဟုတ်ပါဘူး။ မြန်မာစာ စာကြောင်းတွေထဲမှာ ရှိတတ်တဲ့အမှား တွေက တကယ့်ကို အများကြီးပါ။ တချို့ အမှားတွေက မျက်လုံးနဲ့ကြည့်ယုံနဲ့ မခွဲခြားနိုင်တာမျိုးတွေလည်း ရှိပါတယ်။ ဒီနေရာမှာတော့ အမှားအမျိုးအစားတွေထဲက တစ်မျိုးဖြစ်တဲ့ typing order အမှား တစ်မျိုး၊ နှစ်မျိုးကို ဥပမာအနေနဲ့ရှင်းပြရင်း၊ အဲဒီလိုအခြေအနေမျိုးမှာ ဖြစ်တတ်တဲ့ sylbreak က ထွက်လာမယ့် အမှား output တွေကိုလည်း လေ့လာကြည့်ကြရအောင်။ \nအောက်မှာ သုံးပြထားတဲ့ \"ခန့်\" က \"ခ န ့ ်\" (ခခွေး နငယ် အောက်မြစ် အသတ်) ဆိုတဲ့ အစီအစဉ် အမှားနဲ့ ရိုက်ထားတာဖြစ်ပါတယ်။ အဲဒါကြောင့် sylbreak က ထွက်လာတဲ့အခါမှာ \"ခခွေး\" နဲ့ \"နငယ် အသတ် အောက်မြစ်\" က ကွဲနေတာဖြစ်ပါတယ်။\nStep4: တကယ်တန်း မှန်ကန်တဲ့ \"ခန့်\" ရဲ့ typing order က \"ခ န ် ့\" (ခခွေး နငယ် အသတ် အောက်မြစ်) ပါ။\nအမြင်အားဖြင့်ကတော့ မခွဲနိုင်ပေမဲ့၊ မှန်ကန်တဲ့ typing order နဲ့ ရိုက်ထားရင်တော့ \"ခန့်\" ဆိုပြီး syllable တစ်ခုအနေနဲ့ ရိုက်ထုတ်ပြပေးပါလိမ့်မယ်။\nStep5: နောက်ထပ် typing order အမှားတစ်ခုကို ကြည့်ကြရအောင်။\nStep6: \"ညကြီး အောက်မြစ် အသတ်\" ဆိုတဲ့ မှားနေတဲ့ အစီအစဉ်ကို \"ညကြီး အသတ် အောက်မြစ်\" ဆိုပြီး\nပြောင်းရိုက်ပြီးတော့ sylbreak လုပ်ကြည့်ရင်တော့ အောက်ပါအတိုင်း \"ထ\" နဲ့ \"ည့်\", \"သ\" နဲ့ \"ည့်\" တွေက ကွဲမနေတော့ပဲ မှန်မှန်ကန်ကန်ဖြတ်ပေးပါလိမ့်မယ်။\nStep7: တချို့အမှားတွေကတော့ ဂရုစိုက်ရင် မျက်စိနဲ့ မြင်နိုင်ပါတယ်။\nဥပမာ \"ဥ\" (အက္ခရာ ဥ) နဲ့ \"ဉ\" (ညကလေး) ကိုမှားရိုက်တဲ့ကိစ္စပါ။\nသို့သော် ကျွန်တော်မြန်မာစာကြောင်းတွေအများကြီးကို ကိုင်တွယ်အလုပ်လုပ်တဲ့အခါတိုင်းမှာ ဒီလိုအမှားက အမြဲတမ်းကို ပါတတ်ပါတယ်။\nဖောင့် (font) မှာလည်း မှန်မှန်ကန်ကန်ခွဲထားမယ်ဆိုရင်၊ အမှန်က ညကလေးဆိုရင် အမြီးက ရှည်ပါတယ်။ \nစာရိုက်သူအများစုက သတိမပြုမိတဲ့ အကြောင်းအရင်း တစ်ခုကလည်း တချို့ text editor တွေမှာ \"အက္ခရာ ဥ\" နှင့် ညကလေး \"ဉ\" ကို ကွဲပြားအောင် မပြသပေးနိုင်လို့ပါ။\nStep8: ဝီကီပီးဒီးယားက မှားနေတဲ့ \"ညကလေး\" ကို \"အက္ခရာ ဥ\" နဲ့ပြန်ပြင်ရိုက်ထားတဲ့ စာကြောင်းနဲ့ နောက်တစ်ခေါက် syllable ဖြတ်ထားတာက အောက်ပါအတိုင်းဖြစ်ပါတယ်။ \"ညကလေး\" နဲ့ \"အက္ခရာ ဥ\" အမှားကိစ္စမှာတော့ syllable segmentation ဖြတ်တဲ့အပိုင်းမှာတော့ ထူးထူးခြားခြား အပြောင်းအလဲ မရှိပါဘူး။"},"code_prompt":{"kind":"string","value":"Python Code:\n# Regular Expression Python Library ကို သုံးလို့ရအောင် import လုပ်တာ\nimport re\n# စာလုံးတွေကို အုပ်စုဖွဲ့တာ (သို့) variable declaration လုပ်တာ\n# တကယ်လို့ syllable break လုပ်တဲ့ အခါမှာ မြန်မာစာလုံးချည်းပဲ သပ်သပ် လုပ်ချင်တာဆိုရင် enChar က မလိုပါဘူး\nmyConsonant = \"က-အ\"\nenChar = \"a-zA-Z0-9\"\notherChar = \"ဣဤဥဦဧဩဪဿ၌၍၏၀-၉၊။!-/:-@[-`{-~\\s\"\nssSymbol = '္'\nngaThat = 'င်'\naThat = '်'\n# Regular expression pattern for Myanmar syllable breaking\n# *** a consonant not after a subscript symbol AND \n# a consonant is not followed by a-That character or a subscript symbol\n# မြန်မာစာကို syllable segmentation လုပ်ဖို့အတွက်က ဒီ RE pattern တစ်ခုတည်းနဲ့ အဆင်ပြေတယ်။\nBreakPattern = re.compile(r\"((?n) = 0\n for x in range(1, n):\n result[n,x] = result[n-1-x:n,x].sum() + result[n-1-x,:x].sum()\n return result\ndef pmatrix(Py_ssize_t N):\n Calculate an NxN matrix of the Schilling distribution.\n \n The elements p[n, x] of the resulting matrix are the probabilities that\n the length of the longest head-run in a sequence of n independent tosses\n of a fair coin is exactly x.\n \n It holds that p[n, x] = a[n, x] / 2 ** n = (A[n, x] - A[n, x-1]) / 2 ** n\n \n Note that the probability that the length of the longest run \n (no matter if head or tail) in a sequence of n independent \n tosses of a fair coin is _exactly_ x is p[n-1, x-1].\n \n cdef np.ndarray[np.double_t, ndim=2] result\n cdef Py_ssize_t n,x,j\n cdef double val\n result = np.zeros((N, N), np.double)\n for n in range(N):\n result[n, 0] = 2.0**(-n)\n result[n, n] = 2.0**(-n) #p_n(x=n) = 1/2**n\n # p_n(x>n) = 0\n for x in range(1, n):\n val=0\n for j in range(n-1-x,n):\n val+=2.0**(j-n)*result[j,x]\n for j in range(0, x):\n val += 2.0**(-x-1)*result[n-1-x,j]\n result[n,x] = val\n return result\n \nExplanation: Definition of the algorithms\nAlgorithms according Schilling's paper\nI have implemented these in Cython for the sake of speed\nEnd of explanation\ndef cormap_pval(n, x):\n Cormap P-value algorithm, giving the probability that from\n n coin-tosses the longest continuous sequence of either heads\n or tails is _not_shorter_ than x.\n \n Python version of the original Fortran90 code of Daniel Franke\n dbl_max_exponent=np.finfo(np.double).maxexp-1\n P=np.zeros(dbl_max_exponent)\n if x <=1:\n pval = 1\n elif x>n:\n pval = 0\n elif x>dbl_max_exponent:\n pval = 0\n elif x==n:\n pval = 2.0**(1-x)\n else:\n half_pow_x = 2**(-x)\n P[1:x] = 0\n i_x = 0\n P[i_x]=2*half_pow_x\n for i in range(x+1, n+1):\n im1_x = i_x # == (i-1) % x\n i_x = i % x\n P[i_x] = P[im1_x] + half_pow_x * (1-P[i_x])\n pval = P[i_x]\n return pval\nExplanation: The cormap_pval algorithm from Daniel Franke, EMBL Hamburg\nEnd of explanation\nN=20\n# Calculate the matrix for A using the slow method\nA_slow = np.empty((N,N), np.uint64)\nfor n in range(N):\n for x in range(N):\n A_slow[n,x]=A(n,x)\nA_fast = Amatrix(N)\nprint('The two matrices are the same:',(np.abs(A_slow - A_fast)).sum()==0)\nExplanation: Validation of the Cython-algorithms\nCheck 1\nAmatrix() returns the same values as Schilling's recursive formula for A_n(x)\nEnd of explanation\nN=50\na=amatrix(N)\nA_fast=Amatrix(N)\nA_constructed=np.empty((N,N), np.uint64)\nfor x in range(N):\n A_constructed[:, x]=a[:,:x+1].sum(axis=1)\nprint('The two matrices are the same:',(np.abs(A_fast - A_constructed).sum()==0))\nExplanation: Check 2\n$A_n(x) = \\sum_{j=0}^{x}a_n(j)$\nEnd of explanation\nN=50\np=pmatrix(N)\na=amatrix(N)\np_from_a=np.empty((N,N), np.double)\nfor n in range(N):\n p_from_a[n, :] = a[n, :]/2**n\nprint('The two matrices are the same:',(np.abs(p - p_from_a).sum()==0))\nExplanation: Check 3\n$p_n(x) = 2^{-n}a_n(x)$\nEnd of explanation\np=pmatrix(50)\nfor n in range(1,5):\n print('{} toss(es):'.format(n))\n for x in range(1,n+1):\n print(' p_{}({}) = {}'.format(n,x,p[n-1,x-1]))\nExplanation: Check 4\nWell-known special cases\n1 toss\n2 possible outcomes: H, T\n\n \n \n
Max lengthOutcomesp
121
\n2 tosses\n4 possible outcomes: HH, HT, TH, TT\n\n \n \n \n
Max lengthOutcomesp
120.5
220.5
\n3 tosses\n8 possible outcomes: HHT, HTT, THT, TTT, HHH, HTH, THH, TTH\n\n \n \n \n \n
Max lengthOutcomesp
120.25
240.5
220.25
\n4 tosses\n16 possible outcomes:\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
OutcomeLongest sequence length
HHTH2
HTTH2
THTH1
TTTH3
HHHH4
HTHH2
THHH3
TTHH2
HHTT2
HTTT3
THTT2
TTTT4
HHHT3
HTHT1
THHT2
TTHT2
\n\n \n \n \n \n \n
Max lengthOutcomesp
120.125
280.5
340.25
420.125
\nEnd of explanation\nNmax=200\ntimes = np.empty(Nmax)\nfor i in range(Nmax):\n t0=time.monotonic()\n p=pmatrix(i)\n times[i]=time.monotonic()-t0\nplt.loglog(np.arange(Nmax),times,'o-',label='Execution time')\nplt.xlabel('N')\nplt.ylabel('Execution time of pmatrix(N) (sec)')\nx=np.arange(Nmax)\na,b=np.polyfit(np.log(x[x>100]), np.log(times[x>100]),1)\nplt.loglog(x,np.exp(np.log(x)*a+b),'r-', label='$\\\\propto N^{%.3f}$' % a)\nplt.legend(loc='best')\nExplanation: Estimate the execution time\nEnd of explanation\nN=50\np=pmatrix(N+1)\nbar(left=np.arange(p.shape[1])-0.5,height=p[N,:])\nplt.axis(xmin=0,xmax=14)\nplt.xlabel('Length of the longest head-run in {} tosses'.format(N))\nplt.ylabel('Probability')\nprint('Most probable maximum head-run length:',p[N,:].argmax())\nExplanation: Some visualization\nEnd of explanation\nN=2050\nprint('Calculating {0}x{0} p-matrix...', flush=True)\nt0=time.monotonic()\np=pmatrix(N)\nt=time.monotonic()-t0\nprint('Done in {} seconds'.format(t))\nnp.savez('cointoss_p.npz',p=p)\nExplanation: Large numbers\nCalculate a large p matrix (takes some time, see above) for use of later computations\nEnd of explanation\nplt.figure(figsize=(8,4))\nplt.subplot(1,2,1)\nplt.imshow(p,norm=matplotlib.colors.Normalize(),interpolation='nearest')\nplt.xlabel('x')\nplt.ylabel('n')\nplt.title('Linear colour scale')\nplt.colorbar()\nplt.subplot(1,2,2)\nplt.imshow(p,norm=matplotlib.colors.LogNorm(), interpolation='nearest')\nplt.xlabel('x')\nplt.ylabel('n')\nplt.title('Logarithmic colour scale')\nplt.colorbar()\nExplanation: Visualize the matrix:\nEnd of explanation\ntable =[['n', 'x', 'p (D. Franke)', 'p (A. Wacha)']]\nfor n, x in [(449,137),(449,10),(2039,338),(2039,18),(200,11),(10,2), (1,0), (1,1), (2,0), (2,1), (2,2), (3,0), (3,1), (3,2), (3,3), (4,0), (4,1), (4,2), (4,3), (4,4)]:\n table.append([n,x,cormap_pval(n,x), p[n-1,max(0,x-1):].sum()])\n \ntab=ipy_table.IpyTable(table)\ntab.apply_theme('basic')\ndisplay(tab)\nExplanation: Compare with the algorithm in CorMap\nNote that in CorMap, the p-value is the probability that the longest continuous sequence of either heads or tails in $n$ coin-tosses is not shorter than $x$, while our pmatrix() gives the probability that it is exactly $x$ long. The desired quantity is obtained from the pmatrix() approach as $\\sum_{j \\ge(x-1)} p_n(j)$.\nEnd of explanation"}}},{"rowIdx":2153,"cells":{"Unnamed: 0":{"kind":"number","value":2153,"string":"2,153"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Iteradores\nUna de las cosas más maravillosas de las compus es que podemos repetir un mismo cálculo para muchos valores de forma automática. Ya hemos visto al menos un iterator (iterador), que no es una lista... es otro objeto.\nStep1: Pero range, la verdad no es una lista. Es un iterador y aprender cómo funciona es útil en varios ámbitos.\nIterar sobre listas\nStep2: En este caso, lo primero que hace el iterador es chequear si el objeto del otro lado del in es un iterador. Esto se puede chequear con la función iter, parecida a type.\nStep3: range()\nStep4: Y es así como Python lo trata como si fuera una lista\nStep5: Si una lista fuera a crear un trillón de valores ($10^{12}$), necesitaríamos terabytes de memoria para almacenarlos.\nAlgunos iteradores útiles\nenumerate\nAlgunas veces queremos no solo iterar sobre los valores en una lista, sino también imprimir el índice de ellos.\nStep6: Pero hay una sintaxis más limpia para esto\nStep7: zip\nLa función zip itera sobre dos iterables y produce una tupla\nStep8: Si las listas son de diferente largo, el largo del zip va a estar dado por la lista más corta.\nmap y filter\nUn poco más intenso\nStep9: El iterador filter toma una función y la aplica sobre todos los valores de un iterador devolviendo sólo aquellos valores que \"pasan\" el filtro.\nStep10: iteradores especializados"},"code_prompt":{"kind":"string","value":"Python Code:\nfor i in range(10):\n print(i, end=' ')\nExplanation: Iteradores\nUna de las cosas más maravillosas de las compus es que podemos repetir un mismo cálculo para muchos valores de forma automática. Ya hemos visto al menos un iterator (iterador), que no es una lista... es otro objeto.\nEnd of explanation\nfor value in [2, 4, 6, 8, 10]:\n # do some operation\n print(value + 1, end=' ')\nExplanation: Pero range, la verdad no es una lista. Es un iterador y aprender cómo funciona es útil en varios ámbitos.\nIterar sobre listas\nEnd of explanation\niter([2, 4, 6, 8, 10])\nI = iter([2, 4, 6, 8, 10])\nprint(next(I))\nprint(next(I))\nExplanation: En este caso, lo primero que hace el iterador es chequear si el objeto del otro lado del in es un iterador. Esto se puede chequear con la función iter, parecida a type.\nEnd of explanation\nrange(10)\niter(range(10))\nExplanation: range(): Una lista no siempre es una lista\nrange() como una lista, expone un iterador:\nEnd of explanation\nN = 10 ** 12\nfor i in range(N):\n if i >= 10: break\n print(i, end=', ')\nExplanation: Y es así como Python lo trata como si fuera una lista:\nEnd of explanation\nL = [2, 4, 6, 8, 10]\nfor i in range(len(L)):\n print(i, L[i])\nExplanation: Si una lista fuera a crear un trillón de valores ($10^{12}$), necesitaríamos terabytes de memoria para almacenarlos.\nAlgunos iteradores útiles\nenumerate\nAlgunas veces queremos no solo iterar sobre los valores en una lista, sino también imprimir el índice de ellos.\nEnd of explanation\nfor i, val in enumerate(L):\n print(i, val)\nExplanation: Pero hay una sintaxis más limpia para esto:\nEnd of explanation\nL = [2, 4, 6, 8, 10]\nR = [3, 6, 9, 12, 15]\nfor lval, rval in zip(L, R):\n print(lval, rval)\nExplanation: zip\nLa función zip itera sobre dos iterables y produce una tupla:\nEnd of explanation\n# find the first 10 square numbers\nsquare = lambda x: x ** 2\nfor val in map(square, range(10)):\n print(val, end=' ')\nExplanation: Si las listas son de diferente largo, el largo del zip va a estar dado por la lista más corta.\nmap y filter\nUn poco más intenso: el iterador map toma una función y la aplica sobre todos los valores de un iterador:\nEnd of explanation\n# find values up to 10 for which x % 2 is zero\nis_even = lambda x: x % 2 == 0\nfor val in filter(is_even, range(10)):\n print(val, end=' ')\nExplanation: El iterador filter toma una función y la aplica sobre todos los valores de un iterador devolviendo sólo aquellos valores que \"pasan\" el filtro.\nEnd of explanation\nfrom itertools import permutations\np = permutations(range(3))\nprint(*p)\nfrom itertools import combinations\nc = combinations(range(4), 2)\nprint(*c)\nfrom itertools import product\np = product('ab', range(3))\nprint(*p)\nExplanation: iteradores especializados: itertools\nYa vimos el count de itertools. Este módulo contiene un montón de funciones útiles. Por ejemplo, aqui veremos itertools.permutations, itertools.combinations, itertools.product.\nEnd of explanation"}}},{"rowIdx":2154,"cells":{"Unnamed: 0":{"kind":"number","value":2154,"string":"2,154"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n 신경망 성능 개선\n신경망의 예측 성능 및 수렴 성능을 개선하기 위해서는 다음과 같은 추가적인 고려를 해야 한다.\n오차(목적) 함수 개선\nStep1: 교차 엔트로피 오차 함수 (Cross-Entropy Cost Function)\n이러한 수렴 속도 문제를 해결하는 방법의 하나는 오차 제곱합 형태가 아닌 교차 엔트로피(Cross-Entropy) 형태의 오차함수를 사용하는 것이다.\n$$\n\\begin{eqnarray} \n C = -\\frac{1}{n} \\sum_x \\left[y \\ln z + (1-y) \\ln (1-z) \\right],\n\\end{eqnarray}\n$$\n미분값은 다음과 같다.\n$$\n\\begin{eqnarray}\n \\frac{\\partial C}{\\partial w_j} & = & -\\frac{1}{n} \\sum_x \\left(\n \\frac{y }{z} -\\frac{(1-y)}{1-z} \\right)\n \\frac{\\partial z}{\\partial w_j} \\\n & = & -\\frac{1}{n} \\sum_x \\left( \n \\frac{y}{\\sigma(a)} \n -\\frac{(1-y)}{1-\\sigma(a)} \\right)\\sigma'(a) x_j \\\n & = &\n \\frac{1}{n}\n \\sum_x \\frac{\\sigma'(a) x_j}{\\sigma(a) (1-\\sigma(a))}\n (\\sigma(a)-y) \\\n & = & \\frac{1}{n} \\sum_x x_j(\\sigma(a)-y) \\ \n & = & \\frac{1}{n} \\sum_x (z-y) x_j\\ \\\n\\frac{\\partial C}{\\partial b} &=& \\frac{1}{n} \\sum_x (z-y)\n\\end{eqnarray}\n$$\n이 식에서 보다시피 기울기(gradient)가 예측 오차(prediction error) $z-y$에 비례하기 때문에\n오차가 크면 수렴 속도가 빠르고\n오차가 적으면 속도가 감소하여 발산을 방지한다.\n교차 엔트로피 구현 예\nhttps\nStep6: 과최적화 문제\n신경망 모형은 파라미터의 수가 다른 모형에 비해 많다.\n * (28x28)x(30)x(10) => 24,000\n * (28x28)x(100)x(10) => 80,000\n이렇게 파라미터의 수가 많으면 과최적화 발생 가능성이 증가한다. 즉, 정확도가 나아지지 않거나 나빠져도 오차 함수는 계속 감소하는 현상이 발생한다.\n예\nStep10: Hyper-Tangent Activation and Rectified Linear Unit (ReLu) Activation\n시그모이드 함수 이외에도 하이퍼 탄젠트 및 ReLu 함수를 사용할 수도 있다.\n하이퍼 탄젠트 activation 함수는 음수 값을 가질 수 있으며 시그모이드 activation 함수보다 일반적으로 수렴 속도가 빠르다.\n$$\n\\begin{eqnarray}\n \\tanh(w \\cdot x+b), \n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\n \\tanh(a) \\equiv \\frac{e^a-e^{-a}}{e^a+e^{-a}}.\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray} \n \\sigma(a) = \\frac{1+\\tanh(a/2)}{2},\n\\end{eqnarray}\n$$\nStep11: Rectified Linear Unit (ReLu) Activation 함수는 무한대 크기의 activation 값이 가능하며 가중치총합 $a$가 큰 경우에도 기울기(gradient)가 0 이되며 사라지지 않는다는 장점이 있다.\n$$\n\\begin{eqnarray}\n \\max(0, w \\cdot x+b).\n\\end{eqnarray}\n$$"},"code_prompt":{"kind":"string","value":"Python Code:\nsigmoid = lambda x: 1/(1+np.exp(-x))\nsigmoid_prime = lambda x: sigmoid(x)*(1-sigmoid(x))\nxx = np.linspace(-10, 10, 1000)\nplt.plot(xx, sigmoid(xx));\nplt.plot(xx, sigmoid_prime(xx));\nExplanation: 신경망 성능 개선\n신경망의 예측 성능 및 수렴 성능을 개선하기 위해서는 다음과 같은 추가적인 고려를 해야 한다.\n오차(목적) 함수 개선: cross-entropy cost function\n정규화: regularization\n가중치 초기값: weight initialization\nSoftmax 출력\nActivation 함수 선택: hyper-tangent and ReLu\n기울기와 수렴 속도 문제\n일반적으로 사용하는 잔차 제곱합(sum of square) 형태의 오차 함수는 대부분의 경우에 기울기 값이 0 이므로 (near-zero gradient) 수렴이 느려지는 단점이 있다.\nhttp://neuralnetworksanddeeplearning.com/chap3.html\n$$\n\\begin{eqnarray}\nz = \\sigma (wx+b)\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\n C = \\frac{(y-z)^2}{2},\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray} \n \\frac{\\partial C}{\\partial w} & = & (z-y)\\sigma'(a) x \\\n \\frac{\\partial C}{\\partial b} & = & (z-y)\\sigma'(a)\n\\end{eqnarray}\n$$\nif $x=1$, $y=0$, \n$$\n\\begin{eqnarray} \n \\frac{\\partial C}{\\partial w} & = & a \\sigma'(a) \\\n \\frac{\\partial C}{\\partial b} & = & a \\sigma'(z)\n\\end{eqnarray}\n$$\n$\\sigma'$는 대부분의 경우에 zero.\nEnd of explanation\n%cd /home/dockeruser/neural-networks-and-deep-learning/src\n%ls\nimport mnist_loader\nimport network2\ntraining_data, validation_data, test_data = mnist_loader.load_data_wrapper()\nnet = network2.Network([784, 30, 10], cost=network2.QuadraticCost)\nnet.large_weight_initializer()\n%time result1 = net.SGD(training_data, 10, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)\nnet = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)\nnet.large_weight_initializer()\n%time result2 = net.SGD(training_data, 10, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)\nplt.plot(result1[1], 'bo-', label=\"quadratic cost\")\nplt.plot(result2[1], 'rs-', label=\"cross-entropy cost\")\nplt.legend(loc=0)\nplt.show()\nExplanation: 교차 엔트로피 오차 함수 (Cross-Entropy Cost Function)\n이러한 수렴 속도 문제를 해결하는 방법의 하나는 오차 제곱합 형태가 아닌 교차 엔트로피(Cross-Entropy) 형태의 오차함수를 사용하는 것이다.\n$$\n\\begin{eqnarray} \n C = -\\frac{1}{n} \\sum_x \\left[y \\ln z + (1-y) \\ln (1-z) \\right],\n\\end{eqnarray}\n$$\n미분값은 다음과 같다.\n$$\n\\begin{eqnarray}\n \\frac{\\partial C}{\\partial w_j} & = & -\\frac{1}{n} \\sum_x \\left(\n \\frac{y }{z} -\\frac{(1-y)}{1-z} \\right)\n \\frac{\\partial z}{\\partial w_j} \\\n & = & -\\frac{1}{n} \\sum_x \\left( \n \\frac{y}{\\sigma(a)} \n -\\frac{(1-y)}{1-\\sigma(a)} \\right)\\sigma'(a) x_j \\\n & = &\n \\frac{1}{n}\n \\sum_x \\frac{\\sigma'(a) x_j}{\\sigma(a) (1-\\sigma(a))}\n (\\sigma(a)-y) \\\n & = & \\frac{1}{n} \\sum_x x_j(\\sigma(a)-y) \\ \n & = & \\frac{1}{n} \\sum_x (z-y) x_j\\ \\\n\\frac{\\partial C}{\\partial b} &=& \\frac{1}{n} \\sum_x (z-y)\n\\end{eqnarray}\n$$\n이 식에서 보다시피 기울기(gradient)가 예측 오차(prediction error) $z-y$에 비례하기 때문에\n오차가 크면 수렴 속도가 빠르고\n오차가 적으면 속도가 감소하여 발산을 방지한다.\n교차 엔트로피 구현 예\nhttps://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/src/network2.py\n```python\nDefine the quadratic and cross-entropy cost functions\nclass QuadraticCost(object):\n@staticmethod\ndef fn(a, y):\n Return the cost associated with an output ``a`` and desired output\n ``y``.\n \n return 0.5*np.linalg.norm(a-y)**2\n@staticmethod\ndef delta(z, a, y):\n Return the error delta from the output layer.\n return (a-y) * sigmoid_prime(z)\nclass CrossEntropyCost(object):\n@staticmethod\ndef fn(a, y):\n Return the cost associated with an output ``a`` and desired output\n ``y``. Note that np.nan_to_num is used to ensure numerical\n stability. In particular, if both ``a`` and ``y`` have a 1.0\n in the same slot, then the expression (1-y)*np.log(1-a)\n returns nan. The np.nan_to_num ensures that that is converted\n to the correct value (0.0).\n \n return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\n@staticmethod\ndef delta(z, a, y):\n Return the error delta from the output layer. Note that the\n parameter ``z`` is not used by the method. It is included in\n the method's parameters in order to make the interface\n consistent with the delta method for other cost classes.\n \n return (a-y)\n```\nEnd of explanation\nfrom ipywidgets import interactive\nfrom IPython.display import Audio, display\ndef softmax_plot(z1=0, z2=0, z3=0, z4=0):\n exps = np.array([np.exp(z1), np.exp(z2), np.exp(z3), np.exp(z4)])\n exp_sum = exps.sum()\n plt.bar(range(len(exps)), exps/exp_sum)\n plt.xlim(-0.3, 4.1)\n plt.ylim(0, 1)\n plt.xticks([])\n \nv = interactive(softmax_plot, z1=(-3, 5, 0.01), z2=(-3, 5, 0.01), z3=(-3, 5, 0.01), z4=(-3, 5, 0.01))\ndisplay(v)\nExplanation: 과최적화 문제\n신경망 모형은 파라미터의 수가 다른 모형에 비해 많다.\n * (28x28)x(30)x(10) => 24,000\n * (28x28)x(100)x(10) => 80,000\n이렇게 파라미터의 수가 많으면 과최적화 발생 가능성이 증가한다. 즉, 정확도가 나아지지 않거나 나빠져도 오차 함수는 계속 감소하는 현상이 발생한다.\n예: \npython\nnet = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost) \nnet.large_weight_initializer()\nnet.SGD(training_data[:1000], 400, 10, 0.5, evaluation_data=test_data, \n monitor_evaluation_accuracy=True, monitor_training_cost=True)\n\n\n\n\nL2 정규화\n이러한 과최적화를 방지하기 위해서는 오차 함수에 다음과 같이 정규화 항목을 추가하여야 한다.\n$$\n\\begin{eqnarray} C = -\\frac{1}{n} \\sum_{j} \\left[ y_j \\ln z^L_j+(1-y_j) \\ln\n(1-z^L_j)\\right] + \\frac{\\lambda}{2n} \\sum_i w_i^2\n\\end{eqnarray}\n$$\n또는 \n$$\n\\begin{eqnarray} C = C_0 + \\frac{\\lambda}{2n}\n\\sum_i w_i^2,\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray} \n \\frac{\\partial C}{\\partial w} & = & \\frac{\\partial C_0}{\\partial w} + \\frac{\\lambda}{n} w \\ \n \\frac{\\partial C}{\\partial b} & = & \\frac{\\partial C_0}{\\partial b}\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray} \n w & \\rightarrow & w-\\eta \\frac{\\partial C_0}{\\partial w}-\\frac{\\eta \\lambda}{n} w \\ \n & = & \\left(1-\\frac{\\eta \\lambda}{n}\\right) w -\\eta \\frac{\\partial C_0}{\\partial w}\n\\end{eqnarray}\n$$\nL2 정규화 구현 예\n`python\ndef total_cost(self, data, lmbda, convert=False):\n Return the total cost for the data setdata. The flagconvertshould be set to False if the data set is the\n training data (the usual case), and to True if the data set is\n the validation or test data. See comments on the similar (but\n reversed) convention for theaccuracy`` method, above.\n \n cost = 0.0\n for x, y in data:\n a = self.feedforward(x)\n if convert: y = vectorized_result(y)\n cost += self.cost.fn(a, y)/len(data)\n cost += 0.5(lmbda/len(data))sum(np.linalg.norm(w)**2 for w in self.weights)\n return cost\ndef update_mini_batch(self, mini_batch, eta, lmbda, n):\n Update the network's weights and biases by applying gradient\n descent using backpropagation to a single mini batch. The\n mini_batch is a list of tuples (x, y), eta is the\n learning rate, lmbda is the regularization parameter, and\n n is the total size of the training data set.\n \n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [(1-eta(lmbda/n))w-(eta/len(mini_batch))nw for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b-(eta/len(mini_batch))nb for b, nb in zip(self.biases, nabla_b)]\n``` \npython\nnet.SGD(training_data[:1000], 400, 10, 0.5, evaluation_data=test_data, lmbda = 0.1,\n monitor_evaluation_cost=True, monitor_evaluation_accuracy=True,\n monitor_training_cost=True, monitor_training_accuracy=True)\n\n\nL1 정규화\nL2 정규화 대신 다음과 같은 L1 정규화를 사용할 수도 있다.\n$$\n\\begin{eqnarray} C = -\\frac{1}{n} \\sum_{j} \\left[ y_j \\ln z^L_j+(1-y_j) \\ln\n(1-z^L_j)\\right] + \\frac{\\lambda}{2n} \\sum_i \\| w_i \\|\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\n\\frac{\\partial C}{\\partial w} = \\frac{\\partial C_0}{\\partial w} + \\frac{\\lambda}{n} \\, {\\rm sgn}(w)\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\nw \\rightarrow w' = w-\\frac{\\eta \\lambda}{n} \\mbox{sgn}(w) - \\eta \\frac{\\partial C_0}{\\partial w}\n\\end{eqnarray}\n$$\nDropout 정규화\nDropout 정규화 방법은 epoch 마다 임의의 hidden layer neurons $100p$%(보통 절반)를 dropout 하여 최적화 과정에 포함하지 않는 방법이다. 이 방법을 사용하면 가중치 값들 값들이 동시에 움직이는 것(co-adaptations) 방지하며 모형 averaging 효과를 가져다 준다.\n\n가중치 갱신이 끝나고 테스트 시점에는 가중치에 $p$를 곱하여 스케일링한다.\n\n가중치 초기화 (Weight initialization)\n뉴런에 대한 입력의 수 $n_{in}$가 증가하면 가중 총합 $a$값의 표준편차도 증가한다.\n $$ \\text{std}(a) \\propto \\sqrt{n_{in}} $$\n \n예를 들어 입력이 1000개, 그 중 절반이 1이면 표준편차는 약 22.4 이 된다.\n $$ \\sqrt{501} \\approx 22.4 $$\n\n이렇게 표준 편가가 크면 수렴이 느려지기 때문에 입력 수에 따라 초기화 가중치의 표준편차를 감소하는 초기화 값 조정이 필요하다.\n$$\\dfrac{1}{\\sqrt{n_{in}} }$$\n가중치 초기화 구현 예\npython\ndef default_weight_initializer(self):\n Initialize each weight using a Gaussian distribution with mean 0\n and standard deviation 1 over the square root of the number of\n weights connecting to the same neuron. Initialize the biases\n using a Gaussian distribution with mean 0 and standard\n deviation 1.\n Note that the first layer is assumed to be an input layer, and\n by convention we won't set any biases for those neurons, since\n biases are only ever used in computing the outputs from later\n layers.\n \n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n\n소프트맥스 출력\n소프트맥스(softmax) 함수는 입력과 출력이 다변수(multiple variable) 인 함수이다. 최고 출력의 위치를 변화하지 않으면서 츨력의 합이 1이 되도록 조정하기 때문에 출력에 확률론적 의미를 부여할 수 있다. 보통 신경망의 최종 출력단에 적용한다.\n$$\n\\begin{eqnarray} \n y^L_j = \\frac{e^{a^L_j}}{\\sum_k e^{a^L_k}},\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\n \\sum_j y^L_j & = & \\frac{\\sum_j e^{a^L_j}}{\\sum_k e^{a^L_k}} = 1\n\\end{eqnarray}\n$$\n\nEnd of explanation\nz = np.linspace(-5, 5, 100)\na = np.tanh(z)\nplt.plot(z, a)\nplt.show()\nExplanation: Hyper-Tangent Activation and Rectified Linear Unit (ReLu) Activation\n시그모이드 함수 이외에도 하이퍼 탄젠트 및 ReLu 함수를 사용할 수도 있다.\n하이퍼 탄젠트 activation 함수는 음수 값을 가질 수 있으며 시그모이드 activation 함수보다 일반적으로 수렴 속도가 빠르다.\n$$\n\\begin{eqnarray}\n \\tanh(w \\cdot x+b), \n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray}\n \\tanh(a) \\equiv \\frac{e^a-e^{-a}}{e^a+e^{-a}}.\n\\end{eqnarray}\n$$\n$$\n\\begin{eqnarray} \n \\sigma(a) = \\frac{1+\\tanh(a/2)}{2},\n\\end{eqnarray}\n$$\nEnd of explanation\nz = np.linspace(-5, 5, 100)\na = np.maximum(z, 0)\nplt.plot(z, a)\nplt.show()\nExplanation: Rectified Linear Unit (ReLu) Activation 함수는 무한대 크기의 activation 값이 가능하며 가중치총합 $a$가 큰 경우에도 기울기(gradient)가 0 이되며 사라지지 않는다는 장점이 있다.\n$$\n\\begin{eqnarray}\n \\max(0, w \\cdot x+b).\n\\end{eqnarray}\n$$\nEnd of explanation"}}},{"rowIdx":2155,"cells":{"Unnamed: 0":{"kind":"number","value":2155,"string":"2,155"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n The Boston Light Swim temperature analysis with Python\nIn the past we demonstrated how to perform a CSW catalog search with OWSLib,\nand how to obtain near real-time data with pyoos.\nIn this notebook we will use both to find all observations and model data around the Boston Harbor to access the sea water temperature.\nThis workflow is part of an example to advise swimmers of the annual Boston lighthouse swim of the Boston Harbor water temperature conditions prior to the race. For more information regarding the workflow presented here see Signell, Richard P.; Fernandes, Filipe; Wilcox, Kyle. 2016. \"Dynamic Reusable Workflows for Ocean Science.\" J. Mar. Sci. Eng. 4, no. 4\nStep1: This notebook is quite big and complex,\nso to help us keep things organized we'll define a cell with the most important options and switches.\nBelow we can define the date,\nbounding box, phenomena SOS and CF names and units,\nand the catalogs we will search.\nStep2: We'll print some of the search configuration options along the way to keep track of them.\nStep3: We already created an OWSLib.fes filter before.\nThe main difference here is that we do not want the atmosphere model data,\nso we are filtering out all the GRIB-2 data format.\nStep4: In the cell below we ask the catalog for all the returns that match the filter and have an OPeNDAP endpoint.\nStep5: We found some models, and observations from NERACOOS there.\nHowever, we do know that there are some buoys from NDBC and CO-OPS available too.\nAlso, those NERACOOS observations seem to be from a CTD mounted at 65 meters below the sea surface. Rendering them useless from our purpose.\nSo let's use the catalog only for the models by filtering the observations with is_station below.\nAnd we'll rely CO-OPS and NDBC services for the observations.\nStep6: Now we can use pyoos collectors for NdbcSos,\nStep7: and CoopsSos.\nStep8: We will join all the observations into an uniform series, interpolated to 1-hour interval, for the model-data comparison.\nThis step is necessary because the observations can be 7 or 10 minutes resolution,\nwhile the models can be 30 to 60 minutes.\nStep9: In this next cell we will save the data for quicker access later.\nStep10: Taking a quick look at the observations\nStep11: Now it is time to loop the models we found above,\nStep12: Next, we will match them with the nearest observed time-series. The max_dist=0.08 is in degrees, that is roughly 8 kilometers.\nStep13: Now it is possible to compute some simple comparison metrics. First we'll calculate the model mean bias\nStep14: And the root mean squared rrror of the deviations from the mean\nStep15: The next 2 cells make the scores \"pretty\" for plotting.\nStep16: The cells from [20] to [25] create a folium map with bokeh for the time-series at the observed points.\nNote that we did mark the nearest model cell location used in the comparison.\nStep17: Here we use a dictionary with some models we expect to find so we can create a better legend for the plots. If any new models are found, we will use its filename in the legend as a default until we can go back and add a short name to our library."},"code_prompt":{"kind":"string","value":"Python Code:\nimport warnings\n# Suppresing warnings for a \"pretty output.\"\nwarnings.simplefilter(\"ignore\")\nExplanation: The Boston Light Swim temperature analysis with Python\nIn the past we demonstrated how to perform a CSW catalog search with OWSLib,\nand how to obtain near real-time data with pyoos.\nIn this notebook we will use both to find all observations and model data around the Boston Harbor to access the sea water temperature.\nThis workflow is part of an example to advise swimmers of the annual Boston lighthouse swim of the Boston Harbor water temperature conditions prior to the race. For more information regarding the workflow presented here see Signell, Richard P.; Fernandes, Filipe; Wilcox, Kyle. 2016. \"Dynamic Reusable Workflows for Ocean Science.\" J. Mar. Sci. Eng. 4, no. 4: 68.\nEnd of explanation\n%%writefile config.yaml\n# Specify a YYYY-MM-DD hh:mm:ss date or integer day offset.\n# If both start and stop are offsets they will be computed relative to datetime.today() at midnight.\n# Use the dates commented below to reproduce the last Boston Light Swim event forecast.\ndate:\n start: -5 # 2016-8-16 00:00:00\n stop: +4 # 2016-8-29 00:00:00\nrun_name: 'latest'\n# Boston harbor.\nregion:\n bbox: [-71.3, 42.03, -70.57, 42.63]\n # Try the bounding box below to see how the notebook will behave for a different region.\n #bbox: [-74.5, 40, -72., 41.5]\n crs: 'urn:ogc:def:crs:OGC:1.3:CRS84'\nsos_name: 'sea_water_temperature'\ncf_names:\n - sea_water_temperature\n - sea_surface_temperature\n - sea_water_potential_temperature\n - equivalent_potential_temperature\n - sea_water_conservative_temperature\n - pseudo_equivalent_potential_temperature\nunits: 'celsius'\ncatalogs:\n - https://data.ioos.us/csw\nExplanation: This notebook is quite big and complex,\nso to help us keep things organized we'll define a cell with the most important options and switches.\nBelow we can define the date,\nbounding box, phenomena SOS and CF names and units,\nand the catalogs we will search.\nEnd of explanation\nimport os\nimport shutil\nfrom datetime import datetime\nfrom ioos_tools.ioos import parse_config\nconfig = parse_config(\"config.yaml\")\n# Saves downloaded data into a temporary directory.\nsave_dir = os.path.abspath(config[\"run_name\"])\nif os.path.exists(save_dir):\n shutil.rmtree(save_dir)\nos.makedirs(save_dir)\nfmt = \"{:*^64}\".format\nprint(fmt(\"Saving data inside directory {}\".format(save_dir)))\nprint(fmt(\" Run information \"))\nprint(\"Run date: {:%Y-%m-%d %H:%M:%S}\".format(datetime.utcnow()))\nprint(\"Start: {:%Y-%m-%d %H:%M:%S}\".format(config[\"date\"][\"start\"]))\nprint(\"Stop: {:%Y-%m-%d %H:%M:%S}\".format(config[\"date\"][\"stop\"]))\nprint(\n \"Bounding box: {0:3.2f}, {1:3.2f},\"\n \"{2:3.2f}, {3:3.2f}\".format(*config[\"region\"][\"bbox\"])\n)\nExplanation: We'll print some of the search configuration options along the way to keep track of them.\nEnd of explanation\ndef make_filter(config):\n from owslib import fes\n from ioos_tools.ioos import fes_date_filter\n kw = dict(\n wildCard=\"*\", escapeChar=\"\\\\\", singleChar=\"?\", propertyname=\"apiso:AnyText\"\n )\n or_filt = fes.Or(\n [fes.PropertyIsLike(literal=(\"*%s*\" % val), **kw) for val in config[\"cf_names\"]]\n )\n not_filt = fes.Not([fes.PropertyIsLike(literal=\"GRIB-2\", **kw)])\n begin, end = fes_date_filter(config[\"date\"][\"start\"], config[\"date\"][\"stop\"])\n bbox_crs = fes.BBox(config[\"region\"][\"bbox\"], crs=config[\"region\"][\"crs\"])\n filter_list = [fes.And([bbox_crs, begin, end, or_filt, not_filt])]\n return filter_list\nfilter_list = make_filter(config)\nExplanation: We already created an OWSLib.fes filter before.\nThe main difference here is that we do not want the atmosphere model data,\nso we are filtering out all the GRIB-2 data format.\nEnd of explanation\nfrom ioos_tools.ioos import get_csw_records, service_urls\nfrom owslib.csw import CatalogueServiceWeb\ndap_urls = []\nprint(fmt(\" Catalog information \"))\nfor endpoint in config[\"catalogs\"]:\n print(\"URL: {}\".format(endpoint))\n try:\n csw = CatalogueServiceWeb(endpoint, timeout=120)\n except Exception as e:\n print(\"{}\".format(e))\n continue\n csw = get_csw_records(csw, filter_list, esn=\"full\")\n OPeNDAP = service_urls(csw.records, identifier=\"OPeNDAP:OPeNDAP\")\n odp = service_urls(\n csw.records, identifier=\"urn:x-esri:specification:ServiceType:odp:url\"\n )\n dap = OPeNDAP + odp\n dap_urls.extend(dap)\n print(\"Number of datasets available: {}\".format(len(csw.records.keys())))\n for rec, item in csw.records.items():\n print(\"{}\".format(item.title))\n if dap:\n print(fmt(\" DAP \"))\n for url in dap:\n print(\"{}.html\".format(url))\n print(\"\\n\")\n# Get only unique endpoints.\ndap_urls = list(set(dap_urls))\nExplanation: In the cell below we ask the catalog for all the returns that match the filter and have an OPeNDAP endpoint.\nEnd of explanation\nfrom ioos_tools.ioos import is_station\nfrom timeout_decorator import TimeoutError\n# Filter out some station endpoints.\nnon_stations = []\nfor url in dap_urls:\n url = f\"{url}#fillmismatch\"\n try:\n if not is_station(url):\n non_stations.append(url)\n except (IOError, OSError, RuntimeError, TimeoutError) as e:\n print(\"Could not access URL {}.html\\n{!r}\".format(url, e))\ndap_urls = non_stations\nprint(fmt(\" Filtered DAP \"))\nfor url in dap_urls:\n print(\"{}.html\".format(url))\nExplanation: We found some models, and observations from NERACOOS there.\nHowever, we do know that there are some buoys from NDBC and CO-OPS available too.\nAlso, those NERACOOS observations seem to be from a CTD mounted at 65 meters below the sea surface. Rendering them useless from our purpose.\nSo let's use the catalog only for the models by filtering the observations with is_station below.\nAnd we'll rely CO-OPS and NDBC services for the observations.\nEnd of explanation\nfrom pyoos.collectors.ndbc.ndbc_sos import NdbcSos\ncollector_ndbc = NdbcSos()\ncollector_ndbc.set_bbox(config[\"region\"][\"bbox\"])\ncollector_ndbc.end_time = config[\"date\"][\"stop\"]\ncollector_ndbc.start_time = config[\"date\"][\"start\"]\ncollector_ndbc.variables = [config[\"sos_name\"]]\nofrs = collector_ndbc.server.offerings\ntitle = collector_ndbc.server.identification.title\nprint(fmt(\" NDBC Collector offerings \"))\nprint(\"{}: {} offerings\".format(title, len(ofrs)))\nimport pandas as pd\nfrom ioos_tools.ioos import collector2table\nndbc = collector2table(\n collector=collector_ndbc, config=config, col=\"sea_water_temperature (C)\"\n)\nif ndbc:\n data = dict(\n station_name=[s._metadata.get(\"station_name\") for s in ndbc],\n station_code=[s._metadata.get(\"station_code\") for s in ndbc],\n sensor=[s._metadata.get(\"sensor\") for s in ndbc],\n lon=[s._metadata.get(\"lon\") for s in ndbc],\n lat=[s._metadata.get(\"lat\") for s in ndbc],\n depth=[s._metadata.get(\"depth\") for s in ndbc],\n )\ntable = pd.DataFrame(data).set_index(\"station_code\")\ntable\nExplanation: Now we can use pyoos collectors for NdbcSos,\nEnd of explanation\nfrom pyoos.collectors.coops.coops_sos import CoopsSos\ncollector_coops = CoopsSos()\ncollector_coops.set_bbox(config[\"region\"][\"bbox\"])\ncollector_coops.end_time = config[\"date\"][\"stop\"]\ncollector_coops.start_time = config[\"date\"][\"start\"]\ncollector_coops.variables = [config[\"sos_name\"]]\nofrs = collector_coops.server.offerings\ntitle = collector_coops.server.identification.title\nprint(fmt(\" Collector offerings \"))\nprint(\"{}: {} offerings\".format(title, len(ofrs)))\ncoops = collector2table(\n collector=collector_coops, config=config, col=\"sea_water_temperature (C)\"\n)\nif coops:\n data = dict(\n station_name=[s._metadata.get(\"station_name\") for s in coops],\n station_code=[s._metadata.get(\"station_code\") for s in coops],\n sensor=[s._metadata.get(\"sensor\") for s in coops],\n lon=[s._metadata.get(\"lon\") for s in coops],\n lat=[s._metadata.get(\"lat\") for s in coops],\n depth=[s._metadata.get(\"depth\") for s in coops],\n )\ntable = pd.DataFrame(data).set_index(\"station_code\")\ntable\nExplanation: and CoopsSos.\nEnd of explanation\ndata = ndbc + coops\nindex = pd.date_range(\n start=config[\"date\"][\"start\"].replace(tzinfo=None),\n end=config[\"date\"][\"stop\"].replace(tzinfo=None),\n freq=\"1H\",\n)\n# Preserve metadata with `reindex`.\nobservations = []\nfor series in data:\n _metadata = series._metadata\n series.index = series.index.tz_localize(None)\n series.index = series.index.tz_localize(None)\n obs = series.reindex(index=index, limit=1, method=\"nearest\")\n obs._metadata = _metadata\n observations.append(obs)\nExplanation: We will join all the observations into an uniform series, interpolated to 1-hour interval, for the model-data comparison.\nThis step is necessary because the observations can be 7 or 10 minutes resolution,\nwhile the models can be 30 to 60 minutes.\nEnd of explanation\nimport iris\nfrom ioos_tools.tardis import series2cube\nattr = dict(\n featureType=\"timeSeries\",\n Conventions=\"CF-1.6\",\n standard_name_vocabulary=\"CF-1.6\",\n cdm_data_type=\"Station\",\n comment=\"Data from http://opendap.co-ops.nos.noaa.gov\",\n)\ncubes = iris.cube.CubeList([series2cube(obs, attr=attr) for obs in observations])\noutfile = os.path.join(save_dir, \"OBS_DATA.nc\")\niris.save(cubes, outfile)\nExplanation: In this next cell we will save the data for quicker access later.\nEnd of explanation\n%matplotlib inline\nax = pd.concat(data).plot(figsize=(11, 2.25))\nExplanation: Taking a quick look at the observations:\nEnd of explanation\nfrom ioos_tools.ioos import get_model_name\nfrom ioos_tools.tardis import get_surface, is_model, proc_cube, quick_load_cubes\nfrom iris.exceptions import ConstraintMismatchError, CoordinateNotFoundError, MergeError\nprint(fmt(\" Models \"))\ncubes = dict()\nfor k, url in enumerate(dap_urls):\n print(\"\\n[Reading url {}/{}]: {}\".format(k + 1, len(dap_urls), url))\n try:\n cube = quick_load_cubes(url, config[\"cf_names\"], callback=None, strict=True)\n if is_model(cube):\n cube = proc_cube(\n cube,\n bbox=config[\"region\"][\"bbox\"],\n time=(config[\"date\"][\"start\"], config[\"date\"][\"stop\"]),\n units=config[\"units\"],\n )\n else:\n print(\"[Not model data]: {}\".format(url))\n continue\n cube = get_surface(cube)\n mod_name = get_model_name(url)\n cubes.update({mod_name: cube})\n except (\n RuntimeError,\n ValueError,\n ConstraintMismatchError,\n CoordinateNotFoundError,\n IndexError,\n ) as e:\n print(\"Cannot get cube for: {}\\n{}\".format(url, e))\nExplanation: Now it is time to loop the models we found above,\nEnd of explanation\nimport iris\nfrom ioos_tools.tardis import (\n add_station,\n ensure_timeseries,\n get_nearest_water,\n make_tree,\n remove_ssh,\n)\nfrom iris.pandas import as_series\nfor mod_name, cube in cubes.items():\n fname = \"{}.nc\".format(mod_name)\n fname = os.path.join(save_dir, fname)\n print(fmt(\" Downloading to file {} \".format(fname)))\n try:\n tree, lon, lat = make_tree(cube)\n except CoordinateNotFoundError:\n print(\"Cannot make KDTree for: {}\".format(mod_name))\n continue\n # Get model series at observed locations.\n raw_series = dict()\n for obs in observations:\n obs = obs._metadata\n station = obs[\"station_code\"]\n try:\n kw = dict(k=10, max_dist=0.08, min_var=0.01)\n args = cube, tree, obs[\"lon\"], obs[\"lat\"]\n try:\n series, dist, idx = get_nearest_water(*args, **kw)\n except RuntimeError as e:\n print(\"Cannot download {!r}.\\n{}\".format(cube, e))\n series = None\n except ValueError:\n status = \"No Data\"\n print(\"[{}] {}\".format(status, obs[\"station_name\"]))\n continue\n if not series:\n status = \"Land \"\n else:\n raw_series.update({station: series})\n series = as_series(series)\n status = \"Water \"\n print(\"[{}] {}\".format(status, obs[\"station_name\"]))\n if raw_series: # Save cube.\n for station, cube in raw_series.items():\n cube = add_station(cube, station)\n cube = remove_ssh(cube)\n try:\n cube = iris.cube.CubeList(raw_series.values()).merge_cube()\n except MergeError as e:\n print(e)\n ensure_timeseries(cube)\n try:\n iris.save(cube, fname)\n except AttributeError:\n # FIXME: we should patch the bad attribute instead of removing everything.\n cube.attributes = {}\n iris.save(cube, fname)\n del cube\n print(\"Finished processing [{}]\".format(mod_name))\nExplanation: Next, we will match them with the nearest observed time-series. The max_dist=0.08 is in degrees, that is roughly 8 kilometers.\nEnd of explanation\nfrom ioos_tools.ioos import stations_keys\ndef rename_cols(df, config):\n cols = stations_keys(config, key=\"station_name\")\n return df.rename(columns=cols)\nfrom ioos_tools.ioos import load_ncs\nfrom ioos_tools.skill_score import apply_skill, mean_bias\ndfs = load_ncs(config)\ndf = apply_skill(dfs, mean_bias, remove_mean=False, filter_tides=False)\nskill_score = dict(mean_bias=df.to_dict())\n# Filter out stations with no valid comparison.\ndf.dropna(how=\"all\", axis=1, inplace=True)\ndf = df.applymap(\"{:.2f}\".format).replace(\"nan\", \"--\")\nExplanation: Now it is possible to compute some simple comparison metrics. First we'll calculate the model mean bias:\n$$ \\text{MB} = \\mathbf{\\overline{m}} - \\mathbf{\\overline{o}}$$\nEnd of explanation\nfrom ioos_tools.skill_score import rmse\ndfs = load_ncs(config)\ndf = apply_skill(dfs, rmse, remove_mean=True, filter_tides=False)\nskill_score[\"rmse\"] = df.to_dict()\n# Filter out stations with no valid comparison.\ndf.dropna(how=\"all\", axis=1, inplace=True)\ndf = df.applymap(\"{:.2f}\".format).replace(\"nan\", \"--\")\nExplanation: And the root mean squared rrror of the deviations from the mean:\n$$ \\text{CRMS} = \\sqrt{\\left(\\mathbf{m'} - \\mathbf{o'}\\right)^2}$$\nwhere: $\\mathbf{m'} = \\mathbf{m} - \\mathbf{\\overline{m}}$ and $\\mathbf{o'} = \\mathbf{o} - \\mathbf{\\overline{o}}$\nEnd of explanation\nimport pandas as pd\n# Stringfy keys.\nfor key in skill_score.keys():\n skill_score[key] = {str(k): v for k, v in skill_score[key].items()}\nmean_bias = pd.DataFrame.from_dict(skill_score[\"mean_bias\"])\nmean_bias = mean_bias.applymap(\"{:.2f}\".format).replace(\"nan\", \"--\")\nskill_score = pd.DataFrame.from_dict(skill_score[\"rmse\"])\nskill_score = skill_score.applymap(\"{:.2f}\".format).replace(\"nan\", \"--\")\nimport folium\nfrom ioos_tools.ioos import get_coordinates\ndef make_map(bbox, **kw):\n line = kw.pop(\"line\", True)\n layers = kw.pop(\"layers\", True)\n zoom_start = kw.pop(\"zoom_start\", 5)\n lon = (bbox[0] + bbox[2]) / 2\n lat = (bbox[1] + bbox[3]) / 2\n m = folium.Map(\n width=\"100%\", height=\"100%\", location=[lat, lon], zoom_start=zoom_start\n )\n if layers:\n url = \"http://oos.soest.hawaii.edu/thredds/wms/hioos/satellite/dhw_5km\"\n w = folium.WmsTileLayer(\n url,\n name=\"Sea Surface Temperature\",\n fmt=\"image/png\",\n layers=\"CRW_SST\",\n attr=\"PacIOOS TDS\",\n overlay=True,\n transparent=True,\n )\n w.add_to(m)\n if line:\n p = folium.PolyLine(\n get_coordinates(bbox), color=\"#FF0000\", weight=2, opacity=0.9,\n )\n p.add_to(m)\n return m\nbbox = config[\"region\"][\"bbox\"]\nm = make_map(bbox, zoom_start=11, line=True, layers=True)\nExplanation: The next 2 cells make the scores \"pretty\" for plotting.\nEnd of explanation\nall_obs = stations_keys(config)\nfrom glob import glob\nfrom operator import itemgetter\nimport iris\nfrom folium.plugins import MarkerCluster\niris.FUTURE.netcdf_promote = True\nbig_list = []\nfor fname in glob(os.path.join(save_dir, \"*.nc\")):\n if \"OBS_DATA\" in fname:\n continue\n cube = iris.load_cube(fname)\n model = os.path.split(fname)[1].split(\"-\")[-1].split(\".\")[0]\n lons = cube.coord(axis=\"X\").points\n lats = cube.coord(axis=\"Y\").points\n stations = cube.coord(\"station_code\").points\n models = [model] * lons.size\n lista = zip(models, lons.tolist(), lats.tolist(), stations.tolist())\n big_list.extend(lista)\nbig_list.sort(key=itemgetter(3))\ndf = pd.DataFrame(big_list, columns=[\"name\", \"lon\", \"lat\", \"station\"])\ndf.set_index(\"station\", drop=True, inplace=True)\ngroups = df.groupby(df.index)\nlocations, popups = [], []\nfor station, info in groups:\n sta_name = all_obs[station]\n for lat, lon, name in zip(info.lat, info.lon, info.name):\n locations.append([lat, lon])\n popups.append(\n \"[{}]: {}\".format(name.rstrip(\"fillmismatch\").rstrip(\"#\"), sta_name)\n )\nMarkerCluster(locations=locations, popups=popups, name=\"Cluster\").add_to(m)\nExplanation: The cells from [20] to [25] create a folium map with bokeh for the time-series at the observed points.\nNote that we did mark the nearest model cell location used in the comparison.\nEnd of explanation\ntitles = {\n \"coawst_4_use_best\": \"COAWST_4\",\n \"global\": \"HYCOM\",\n \"NECOFS_GOM3_FORECAST\": \"NECOFS_GOM3\",\n \"NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST\": \"NECOFS_MassBay\",\n \"OBS_DATA\": \"Observations\",\n}\nfrom itertools import cycle\nfrom bokeh.embed import file_html\nfrom bokeh.models import HoverTool\nfrom bokeh.palettes import Category20\nfrom bokeh.plotting import figure\nfrom bokeh.resources import CDN\nfrom folium import IFrame\n# Plot defaults.\ncolors = Category20[20]\ncolorcycler = cycle(colors)\ntools = \"pan,box_zoom,reset\"\nwidth, height = 750, 250\ndef make_plot(df, station):\n p = figure(\n toolbar_location=\"above\",\n x_axis_type=\"datetime\",\n width=width,\n height=height,\n tools=tools,\n title=str(station),\n )\n for column, series in df.iteritems():\n series.dropna(inplace=True)\n if not series.empty:\n if \"OBS_DATA\" not in column:\n bias = mean_bias[str(station)][column]\n skill = skill_score[str(station)][column]\n line_color = next(colorcycler)\n kw = dict(alpha=0.65, line_color=line_color)\n else:\n skill = bias = \"NA\"\n kw = dict(alpha=1, color=\"crimson\")\n legend = f\"{titles.get(column, column)}\"\n legend = legend.rstrip(\"fillmismatch\").rstrip(\"#\")\n line = p.line(\n x=series.index,\n y=series.values,\n legend=legend,\n line_width=5,\n line_cap=\"round\",\n line_join=\"round\",\n **kw,\n )\n p.add_tools(\n HoverTool(\n tooltips=[\n (\"Name\", \"{}\".format(titles.get(column, column))),\n (\"Bias\", bias),\n (\"Skill\", skill),\n ],\n renderers=[line],\n )\n )\n return p\ndef make_marker(p, station):\n lons = stations_keys(config, key=\"lon\")\n lats = stations_keys(config, key=\"lat\")\n lon, lat = lons[station], lats[station]\n html = file_html(p, CDN, station)\n iframe = IFrame(html, width=width + 40, height=height + 80)\n popup = folium.Popup(iframe, max_width=2650)\n icon = folium.Icon(color=\"green\", icon=\"stats\")\n marker = folium.Marker(location=[lat, lon], popup=popup, icon=icon)\n return marker\ndfs = load_ncs(config)\nfor station in dfs:\n sta_name = all_obs[station]\n df = dfs[station]\n if df.empty:\n continue\n p = make_plot(df, station)\n marker = make_marker(p, station)\n marker.add_to(m)\nfolium.LayerControl().add_to(m)\nm\nExplanation: Here we use a dictionary with some models we expect to find so we can create a better legend for the plots. If any new models are found, we will use its filename in the legend as a default until we can go back and add a short name to our library.\nEnd of explanation"}}},{"rowIdx":2156,"cells":{"Unnamed: 0":{"kind":"number","value":2156,"string":"2,156"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Example 7\nStep1: Part 1\nStep2: Now we will do the timing analysis as well as print out the critical path\nStep3: We are also able to print out the critical paths as well as get them\nback as an array.\nStep4: Part 2\nStep5: Part 3\nStep6: Part 4\nStep7: Now to see the difference"},"code_prompt":{"kind":"string","value":"Python Code:\nimport pyrtl\nExplanation: Example 7: Reduction and Speed Analysis\nAfter building a circuit, one might want to do some stuff to reduce the\nhardware into simpler nets as well as analyze various metrics of the\nhardware. This functionality is provided in the Passes part of PyRTL\nand will demonstrated here.\nEnd of explanation\n# Creating a sample harware block\npyrtl.reset_working_block()\nconst_wire = pyrtl.Const(6, bitwidth=4)\nin_wire2 = pyrtl.Input(bitwidth=4, name=\"input2\")\nout_wire = pyrtl.Output(bitwidth=5, name=\"output\")\nout_wire <<= const_wire + in_wire2\nExplanation: Part 1: Timing Analysis\nTiming and area usage are key considerations of any hardware block that one\nmakes.\nPyRTL provides functions to do these opertions\nEnd of explanation\n# Generating timing analysis information\nprint(\"Pre Synthesis:\")\ntiming = pyrtl.TimingAnalysis()\ntiming.print_max_length()\nExplanation: Now we will do the timing analysis as well as print out the critical path\nEnd of explanation\ncritical_path_info = timing.critical_path()\nExplanation: We are also able to print out the critical paths as well as get them\nback as an array.\nEnd of explanation\nlogic_area, mem_area = pyrtl.area_estimation(tech_in_nm=65)\nest_area = logic_area + mem_area\nprint(\"Estimated Area of block\", est_area, \"sq mm\")\nprint()\nExplanation: Part 2: Area Analysis\nPyRTL also provides estimates for the area that would be used up if the\ncircuit was printed as an ASIC\nEnd of explanation\npyrtl.synthesize()\nprint(\"Pre Optimization:\")\ntiming = pyrtl.TimingAnalysis()\ntiming.print_max_length()\nfor net in pyrtl.working_block().logic:\n print(str(net))\nprint()\nExplanation: Part 3: Synthesis\nSynthesis is the operation of reducing the circuit into simpler components\nThe base synthesis function breaks down the more complex logic operations\ninto logic gates (keeps registers and memories intact) as well as reduces\nall combinatorial logic into ops that only use one bitwidth wires\nThis synthesis allows for PyRTL to make optimizations to the net structure\nas well as prepares it for further transformations on the PyRTL Toolchain\nEnd of explanation\npyrtl.optimize()\nExplanation: Part 4: Optimization\nPyRTL has functions built in to eliminate unnecessary logic from the\ncircuit.\nThese functions are all done with a simple call:\nEnd of explanation\nprint(\"Post Optimization:\")\ntiming = pyrtl.TimingAnalysis()\ntiming.print_max_length()\nfor net in pyrtl.working_block().logic:\n print(str(net))\nExplanation: Now to see the difference\nEnd of explanation"}}},{"rowIdx":2157,"cells":{"Unnamed: 0":{"kind":"number","value":2157,"string":"2,157"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Demo of Max-value Entropy Search Acqusition\nThis notebook provides a demo of the max-value entropy search (MES) acquisition function of Wang et al [2017].\nhttps\nStep1: Set up our toy problem (1D optimisation of the forrester function) and collect 3 initial points.\nStep2: Fit our GP model to the observed data.\nStep3: Lets plot the resulting acqusition functions for the chosen model on the collected data. Note that MES takes a fraction of the time of ES to compute (plotted on a log scale). This difference becomes even more apparent as you increase the dimensions of the sample space."},"code_prompt":{"kind":"string","value":"Python Code:\n### General imports\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors as mcolors\nimport GPy\nimport time\n### Emukit imports\nfrom emukit.test_functions import forrester_function\nfrom emukit.core.loop.user_function import UserFunctionWrapper\nfrom emukit.core import ContinuousParameter, ParameterSpace\nfrom emukit.bayesian_optimization.acquisitions import EntropySearch, ExpectedImprovement, MaxValueEntropySearch\nfrom emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper\n### --- Figure config\nLEGEND_SIZE = 15\nExplanation: Demo of Max-value Entropy Search Acqusition\nThis notebook provides a demo of the max-value entropy search (MES) acquisition function of Wang et al [2017].\nhttps://arxiv.org/pdf/1703.01968.pdf\nMES provides the high perfoming optimization of other entropy-based acquisitions. However, unlike standard entropy-search, MES requires a fraction of the computational cost. The computational savings are due to MES seeking to reduce our uncertainty in the value of the function at the optima (a 1-dimensional quantity) rather than uncertainty in the location of the optima (a d-dimensional quantity). Therefore, MES has a computational cost that scales linearly with the parameter space dimension d. \nOur implementation of MES is controlled by two parameters: \"num_samples\" and \"grid_size\". \"num_samples\" controls how many mote-carlo samples we use to calculate entropy reductions. As we only approximate a 1-d integral, \"num_samples\" does not need to be large or be increased for problems with large d (unlike standard entropy-search). We recomend values between 5-15. \"grid_size\" controls the coarseness of the grid used to approximate the distribution of our max value and so must increase with d. We recommend 10,000*d. Note that as the grid must only be calculated once per BO step, the choice of \"grid_size\" does not have a large impact on computation time.\nEnd of explanation\ntarget_function, space = forrester_function()\nx_plot = np.linspace(space.parameters[0].min, space.parameters[0].max, 200)[:, None]\ny_plot = target_function(x_plot)\nX_init = np.array([[0.2],[0.6], [0.9]])\nY_init = target_function(X_init)\nplt.figure(figsize=(12, 8))\nplt.plot(x_plot, y_plot, \"k\", label=\"Objective Function\")\nplt.scatter(X_init,Y_init)\nplt.legend(loc=2, prop={'size': LEGEND_SIZE})\nplt.xlabel(r\"$x$\")\nplt.ylabel(r\"$f(x)$\")\nplt.grid(True)\nplt.xlim(0, 1)\nplt.show()\nExplanation: Set up our toy problem (1D optimisation of the forrester function) and collect 3 initial points.\nEnd of explanation\ngpy_model = GPy.models.GPRegression(X_init, Y_init, GPy.kern.RBF(1, lengthscale=0.08, variance=20), noise_var=1e-10)\nemukit_model = GPyModelWrapper(gpy_model)\nExplanation: Fit our GP model to the observed data.\nEnd of explanation\nei_acquisition = ExpectedImprovement(emukit_model)\nes_acquisition = EntropySearch(emukit_model,space)\nmes_acquisition = MaxValueEntropySearch(emukit_model,space)\nt_0=time.time()\nei_plot = ei_acquisition.evaluate(x_plot)\nt_ei=time.time()-t_0\nes_plot = es_acquisition.evaluate(x_plot)\nt_es=time.time()-t_ei\nmes_plot = mes_acquisition.evaluate(x_plot)\nt_mes=time.time()-t_es\nplt.figure(figsize=(12, 8))\nplt.plot(x_plot, (es_plot - np.min(es_plot)) / (np.max(es_plot) - np.min(es_plot)), \"green\", label=\"Entropy Search\")\nplt.plot(x_plot, (ei_plot - np.min(ei_plot)) / (np.max(ei_plot) - np.min(ei_plot)), \"blue\", label=\"Expected Improvement\")\nplt.plot(x_plot, (mes_plot - np.min(mes_plot)) / (np.max(mes_plot) - np.min(mes_plot)), \"red\", label=\"Max Value Entropy Search\")\nplt.legend(loc=1, prop={'size': LEGEND_SIZE})\nplt.xlabel(r\"$x$\")\nplt.ylabel(r\"$f(x)$\")\nplt.grid(True)\nplt.xlim(0, 1)\nplt.show()\nplt.figure(figsize=(12, 8))\nplt.bar([\"ei\",\"es\",\"mes\"],[t_ei,t_es,t_mes])\nplt.xlabel(\"Acquisition Choice\")\nplt.yscale('log')\nplt.ylabel(\"Calculation Time (secs)\")\nExplanation: Lets plot the resulting acqusition functions for the chosen model on the collected data. Note that MES takes a fraction of the time of ES to compute (plotted on a log scale). This difference becomes even more apparent as you increase the dimensions of the sample space.\nEnd of explanation"}}},{"rowIdx":2158,"cells":{"Unnamed: 0":{"kind":"number","value":2158,"string":"2,158"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Copyright 2019 The TensorFlow Probability Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nStep1: Bayesian Switchpoint Analysis\n\n \n \n \n \n
\n \n \n View on TensorFlow.org\n \n Run in Google Colab\n \n View source on GitHub\n \n Download notebook\n
\nThis notebook reimplements and extends the Bayesian “Change point analysis” example from the pymc3 documentation.\nPrerequisites\nEnd of explanation\ndisaster_data = np.array([ 4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,\n 3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,\n 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,\n 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,\n 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,\n 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\nyears = np.arange(1851, 1962)\nplt.plot(years, disaster_data, 'o', markersize=8);\nplt.ylabel('Disaster count')\nplt.xlabel('Year')\nplt.title('Mining disaster data set')\nplt.show()\nExplanation: Dataset\nThe dataset is from here. Note, there is another version of this example floating around, but it has “missing” data – in which case you’d need to impute missing values. (Otherwise your model will not ever leave its initial parameters because the likelihood function will be undefined.)\nEnd of explanation\ndef disaster_count_model(disaster_rate_fn):\n disaster_count = tfd.JointDistributionNamed(dict(\n e=tfd.Exponential(rate=1.),\n l=tfd.Exponential(rate=1.),\n s=tfd.Uniform(0., high=len(years)),\n d_t=lambda s, l, e: tfd.Independent(\n tfd.Poisson(rate=disaster_rate_fn(np.arange(len(years)), s, l, e)),\n reinterpreted_batch_ndims=1)\n ))\n return disaster_count\ndef disaster_rate_switch(ys, s, l, e):\n return tf.where(ys < s, e, l)\ndef disaster_rate_sigmoid(ys, s, l, e):\n return e + tf.sigmoid(ys - s) * (l - e)\nmodel_switch = disaster_count_model(disaster_rate_switch)\nmodel_sigmoid = disaster_count_model(disaster_rate_sigmoid)\nExplanation: Probabilistic Model\nThe model assumes a “switch point” (e.g. a year during which safety regulations changed), and Poisson-distributed disaster rate with constant (but potentially different) rates before and after that switch point.\nThe actual disaster count is fixed (observed); any sample of this model will need to specify both the switchpoint and the “early” and “late” rate of disasters.\nOriginal model from pymc3 documentation example:\n$$\n\\begin{align}\n(D_t|s,e,l)&\\sim \\text{Poisson}(r_t), \\\n & \\,\\quad\\text{with}\\; r_t = \\begin{cases}e & \\text{if}\\; t < s\\l &\\text{if}\\; t \\ge s\\end{cases} \\\ns&\\sim\\text{Discrete Uniform}(t_l,\\,t_h) \\\ne&\\sim\\text{Exponential}(r_e)\\\nl&\\sim\\text{Exponential}(r_l)\n\\end{align}\n$$\nHowever, the mean disaster rate $r_t$ has a discontinuity at the switchpoint $s$, which makes it not differentiable. Thus it provides no gradient signal to the Hamiltonian Monte Carlo (HMC) algorithm – but because the $s$ prior is continuous, HMC’s fallback to a random walk is good enough to find the areas of high probability mass in this example.\nAs a second model, we modify the original model using a sigmoid “switch” between e and l to make the transition differentiable, and use a continuous uniform distribution for the switchpoint $s$. (One could argue this model is more true to reality, as a “switch” in mean rate would likely be stretched out over multiple years.) The new model is thus:\n$$\n\\begin{align}\n(D_t|s,e,l)&\\sim\\text{Poisson}(r_t), \\\n & \\,\\quad \\text{with}\\; r_t = e + \\frac{1}{1+\\exp(s-t)}(l-e) \\\ns&\\sim\\text{Uniform}(t_l,\\,t_h) \\\ne&\\sim\\text{Exponential}(r_e)\\\nl&\\sim\\text{Exponential}(r_l)\n\\end{align}\n$$\nIn the absence of more information we assume $r_e = r_l = 1$ as parameters for the priors. We’ll run both models and compare their inference results.\nEnd of explanation\ndef target_log_prob_fn(model, s, e, l):\n return model.log_prob(s=s, e=e, l=l, d_t=disaster_data)\nmodels = [model_switch, model_sigmoid]\nprint([target_log_prob_fn(m, 40., 3., .9).numpy() for m in models]) # Somewhat likely result\nprint([target_log_prob_fn(m, 60., 1., 5.).numpy() for m in models]) # Rather unlikely result\nprint([target_log_prob_fn(m, -10., 1., 1.).numpy() for m in models]) # Impossible result\nExplanation: The above code defines the model via JointDistributionSequential distributions. The disaster_rate functions are called with an array of [0, ..., len(years)-1] to produce a vector of len(years) random variables – the years before the switchpoint are early_disaster_rate, the ones after late_disaster_rate (modulo the sigmoid transition).\nHere is a sanity-check that the target log prob function is sane:\nEnd of explanation\nnum_results = 10000\nnum_burnin_steps = 3000\n@tf.function(autograph=False, jit_compile=True)\ndef make_chain(target_log_prob_fn):\n kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=0.05,\n num_leapfrog_steps=3),\n bijector=[\n # The switchpoint is constrained between zero and len(years).\n # Hence we supply a bijector that maps the real numbers (in a\n # differentiable way) to the interval (0;len(yers))\n tfb.Sigmoid(low=0., high=tf.cast(len(years), dtype=tf.float32)),\n # Early and late disaster rate: The exponential distribution is\n # defined on the positive real numbers\n tfb.Softplus(),\n tfb.Softplus(),\n ])\n kernel = tfp.mcmc.SimpleStepSizeAdaptation(\n inner_kernel=kernel,\n num_adaptation_steps=int(0.8*num_burnin_steps))\n states = tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=num_burnin_steps,\n current_state=[\n # The three latent variables\n tf.ones([], name='init_switchpoint'),\n tf.ones([], name='init_early_disaster_rate'),\n tf.ones([], name='init_late_disaster_rate'),\n ],\n trace_fn=None,\n kernel=kernel)\n return states\nswitch_samples = [s.numpy() for s in make_chain(\n lambda *args: target_log_prob_fn(model_switch, *args))]\nsigmoid_samples = [s.numpy() for s in make_chain(\n lambda *args: target_log_prob_fn(model_sigmoid, *args))]\nswitchpoint, early_disaster_rate, late_disaster_rate = zip(\n switch_samples, sigmoid_samples)\nExplanation: HMC to do Bayesian inference\nWe define the number of results and burn-in steps required; the code is mostly modeled after the documentation of tfp.mcmc.HamiltonianMonteCarlo. It uses an adaptive step size (otherwise the outcome is very sensitive to the step size value chosen). We use values of one as the initial state of the chain.\nThis is not the full story though. If you go back to the model definition above, you’ll note that some of the probability distributions are not well-defined on the whole real number line. Therefore we constrain the space that HMC shall examine by wrapping the HMC kernel with a TransformedTransitionKernel that specifies the forward bijectors to transform the real numbers onto the domain that the probability distribution is defined on (see comments in the code below).\nEnd of explanation\ndef _desc(v):\n return '(median: {}; 95%ile CI: $[{}, {}]$)'.format(\n *np.round(np.percentile(v, [50, 2.5, 97.5]), 2))\nfor t, v in [\n ('Early disaster rate ($e$) posterior samples', early_disaster_rate),\n ('Late disaster rate ($l$) posterior samples', late_disaster_rate),\n ('Switch point ($s$) posterior samples', years[0] + switchpoint),\n]:\n fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True)\n for (m, i) in (('Switch', 0), ('Sigmoid', 1)):\n a = ax[i]\n a.hist(v[i], bins=50)\n a.axvline(x=np.percentile(v[i], 50), color='k')\n a.axvline(x=np.percentile(v[i], 2.5), color='k', ls='dashed', alpha=.5)\n a.axvline(x=np.percentile(v[i], 97.5), color='k', ls='dashed', alpha=.5)\n a.set_title(m + ' model ' + _desc(v[i]))\n fig.suptitle(t)\n plt.show()\nExplanation: Run both models in parallel:\nVisualize the result\nWe visualize the result as histograms of samples of the posterior distribution for the early and late disaster rate, as well as the switchpoint. The histograms are overlaid with a solid line representing the sample median, as well as the 95%ile credible interval bounds as dashed lines.\nEnd of explanation"}}},{"rowIdx":2159,"cells":{"Unnamed: 0":{"kind":"number","value":2159,"string":"2,159"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Probability Calibration with SplineCalib\nThis workbook demonstrates the SplineCalib algorithm detailed in the paper\n\"Spline-Based Probability Calibration\" https\nStep1: In the next few cells, we load in some data, inspect it, select columns for our features and outcome (mortality) and fill in missing values with the median of that column.\nStep2: Now we divide the data into training, calibration, and test sets. The training set will be used to fit the model, the calibration set will be used to calibrate the probabilities, and the test set will be used to evaluate the performance. We use a 60-20-20 split (achived by first doing 80/20 and then splitting the 80 by 75/25)\nStep3: Next, we fit a Random Forest model to our training data. Then we use that model to predict \"probabilities\" on our validation and test sets. \nI use quotes on \"probabilities\" because these numbers, which are the percentage of trees that voted \"yes\" are better understood as mere scores. A higher value should generally indicate a higher probability of mortality. However, there is no reason to expect these to be well-calibrated probabilities. The fact that, say, 60% of the trees voted \"yes\" on a particular case does not mean that that case has a 60% probability of mortality.\nWe will demonstrate this empirically later.\nStep4: Model Evaluation\nYou are pretty happy with this model since it has an AUROC of .859. But someone asks you if the probability is well-calibrated. In other words, if we looked at all the time your model predicted a mortality probability of 40%, did around 40% of them actually die? Or was it 20%, or 80%? It turns our that AUROC just measures the ranking of cases and does not evaluate if the probabilities are meaningful. In fact, if you multiply all of your predicted probabilities by .1, you would still get the same AUROC.\nChecking Calibration\nHow do we know if a model is well-calibrated? One way to check is to create a \"Reliability Diagram\". The idea behind the reliability diagram is the following\nStep5: Above, we see that the model is largely under-predicting the probability of mortality in the range .35 to .85. For example, when the model predicts a probability of between .6 and .65, more than 80% of those patients died. And the error bars indicate that this is not likely due to random error. In other words, our model is poorly calibrated.\nCalibrating a Model\nSince our current model is not well-calibrated, we would like to fix this. We want that when our model says 60% chance of mortality, it means 60% and not 40% or 80%. We will discuss two ways to fix this\nStep6: From the above, we see that not only do our reliability diagrams look better, but our log_loss values have substantially improved. Log_loss measures not only the discriminative power of the model but also how well-calibrated it is.\nApproach 2\nStep7: We see above that the cross-validated approach gives similar performance (slightly better in this case). Additionally, we did not use the 20% of data set aside for calibration at all in the second approach. We could use approach 2 on the entire training and calibration data and (presumably) get an even better model.\nStep8: Indeed, we get a slightly better AUC and log_loss both before and after calibration, due to having a larger training set for our model to learn from\nSerializing Models\nThe SplineCalib object can be saved to disk easily with joblib.dump() and reloaded with joblib.load()\nStep9: Comparison to Other Calibration Approaches\nHere we compare SplineCalib to Isotonic Regression, Platt Scaling and Beta Calibration."},"code_prompt":{"kind":"string","value":"Python Code:\n# \"pip install ml_insights\" in terminal if needed\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ml_insights as mli\n%matplotlib inline\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss, brier_score_loss, roc_auc_score\nmli.__version__\nExplanation: Probability Calibration with SplineCalib\nThis workbook demonstrates the SplineCalib algorithm detailed in the paper\n\"Spline-Based Probability Calibration\" https://arxiv.org/abs/1809.07751\nWe build a random forest model and demonstrate that using the vote percentage as a probability is not well-calibrated. We then show different approaches on how to use SplineCalib to appropriately calibrate the model.\nWe also show how to serialize the calibration object to be able to save it on disk and re-use it.\nMIMIC ICU Data*\nWe illustrate this process using a mortality model on the MIMIC ICU data\n*MIMIC-III, a freely accessible critical care database. Johnson AEW, Pollard TJ, Shen L, Lehman L, Feng M, Ghassemi M, Moody B, Szolovits P, Celi LA, and Mark RG. Scientific Data (2016).\nhttps://mimic.physionet.org\nEnd of explanation\n# Load dataset derived from the MMIC database\nlab_aug_df = pd.read_csv(\"data/lab_vital_icu_table.csv\")\nlab_aug_df.head(10)\n# Choose a subset of variables\nX = lab_aug_df.loc[:,['aniongap_min', 'aniongap_max',\n 'albumin_min', 'albumin_max', 'bicarbonate_min', 'bicarbonate_max',\n 'bilirubin_min', 'bilirubin_max', 'creatinine_min', 'creatinine_max',\n 'chloride_min', 'chloride_max', \n 'hematocrit_min', 'hematocrit_max', 'hemoglobin_min', 'hemoglobin_max',\n 'lactate_min', 'lactate_max', 'platelet_min', 'platelet_max',\n 'potassium_min', 'potassium_max', 'ptt_min', 'ptt_max', 'inr_min',\n 'inr_max', 'pt_min', 'pt_max', 'sodium_min', 'sodium_max', 'bun_min',\n 'bun_max', 'wbc_min', 'wbc_max','sysbp_max', 'sysbp_mean', 'diasbp_min', 'diasbp_max', 'diasbp_mean',\n 'meanbp_min', 'meanbp_max', 'meanbp_mean', 'resprate_min',\n 'resprate_max', 'resprate_mean', 'tempc_min', 'tempc_max', 'tempc_mean',\n 'spo2_min', 'spo2_max', 'spo2_mean']]\ny = lab_aug_df['hospital_expire_flag']\n# Impute the median for in each column to replace NA's \nmedian_vec = [X.iloc[:,i].median() for i in range(len(X.columns))]\nfor i in range(len(X.columns)):\n X.iloc[:,i].fillna(median_vec[i],inplace=True)\nExplanation: In the next few cells, we load in some data, inspect it, select columns for our features and outcome (mortality) and fill in missing values with the median of that column.\nEnd of explanation\nX_train_calib, X_test, y_train_calib, y_test = train_test_split(X, y, test_size=0.2, random_state=942)\nX_train, X_calib, y_train, y_calib = train_test_split(X_train_calib, y_train_calib, test_size=0.25, random_state=942)\nX_train.shape, X_calib.shape, X_test.shape\nExplanation: Now we divide the data into training, calibration, and test sets. The training set will be used to fit the model, the calibration set will be used to calibrate the probabilities, and the test set will be used to evaluate the performance. We use a 60-20-20 split (achived by first doing 80/20 and then splitting the 80 by 75/25)\nEnd of explanation\nrfmodel1 = RandomForestClassifier(n_estimators = 500, class_weight='balanced_subsample', random_state=942, n_jobs=-1 )\nrfmodel1.fit(X_train,y_train)\npreds_test_uncalib = rfmodel1.predict_proba(X_test)[:,1]\npreds_test_uncalib[:10]\nroc_auc_score(y_test, preds_test_uncalib), roc_auc_score(y_test, .1*preds_test_uncalib)\nExplanation: Next, we fit a Random Forest model to our training data. Then we use that model to predict \"probabilities\" on our validation and test sets. \nI use quotes on \"probabilities\" because these numbers, which are the percentage of trees that voted \"yes\" are better understood as mere scores. A higher value should generally indicate a higher probability of mortality. However, there is no reason to expect these to be well-calibrated probabilities. The fact that, say, 60% of the trees voted \"yes\" on a particular case does not mean that that case has a 60% probability of mortality.\nWe will demonstrate this empirically later.\nEnd of explanation\nmli.plot_reliability_diagram(y_test, preds_test_uncalib, marker='.')\nExplanation: Model Evaluation\nYou are pretty happy with this model since it has an AUROC of .859. But someone asks you if the probability is well-calibrated. In other words, if we looked at all the time your model predicted a mortality probability of 40%, did around 40% of them actually die? Or was it 20%, or 80%? It turns our that AUROC just measures the ranking of cases and does not evaluate if the probabilities are meaningful. In fact, if you multiply all of your predicted probabilities by .1, you would still get the same AUROC.\nChecking Calibration\nHow do we know if a model is well-calibrated? One way to check is to create a \"Reliability Diagram\". The idea behind the reliability diagram is the following:\n- Bin the interval [0,1] into smaller subsets (e.g. [0, 0.05], [0.05, .1], ... [.95,1])\n- Find the empirical probabilities when the probabilities fell into each bin (if there were 20 times, and 9 of them were \"yes\", the empirical probability is .45)\n- Plot the predicted probability (average of predicted probabilities in each bin) (x-axis) vs the empirical probabilities(y-axis)\n- put error bars based on the size of the bin\n- When the dots are (significantly) above the line y=x, the model is under-predicting the true probability, if below the line, model is over-predicting the true probability.\nEnd of explanation\n# Define SplineCalib object\ncalib1 = mli.SplineCalib()\n# Use the model to make predictions on the calibration set\npreds_cset = rfmodel1.predict_proba(X_calib)[:,1]\n# Fit the calibration object on the calibration set\ncalib1.fit(preds_cset, y_calib)\n# Visually inspect the quality of the calibration on the calibration set\nmli.plot_reliability_diagram(y_calib, preds_cset);\ncalib1.show_calibration_curve()\n# Visually inspect the quality of the calibration on the test set\ncalib1.show_calibration_curve()\nmli.plot_reliability_diagram(y_test, preds_test_uncalib);\ncalib1.show_spline_reg_plot()\n# Calibrate the previously generated predictions from the model on the test set\npreds_test_calib1 = calib1.calibrate(preds_test_uncalib)\n# Visually inspect the calibration of the newly calibrated predictions\nmli.plot_reliability_diagram(y_test, preds_test_calib1);\n## Compare the log_loss values\nlog_loss(y_test, preds_test_uncalib),log_loss(y_test, preds_test_calib1)\nExplanation: Above, we see that the model is largely under-predicting the probability of mortality in the range .35 to .85. For example, when the model predicts a probability of between .6 and .65, more than 80% of those patients died. And the error bars indicate that this is not likely due to random error. In other words, our model is poorly calibrated.\nCalibrating a Model\nSince our current model is not well-calibrated, we would like to fix this. We want that when our model says 60% chance of mortality, it means 60% and not 40% or 80%. We will discuss two ways to fix this:\nUse an independent calibration set\nUsing Cross-validation to generate scores from the training set.\nThe first method is simpler, but requires a separate data set, meaning that you will have less data to train your model with. It is good to use if you have plenty of data. It is also a useful approach if you think your distribution has \"shifted\" but the underlying signal in the model is fundamentally unchanged. In some cases it may make sense to \"re-calibrate\" a model on the \"current\" population without doing a full re-training.\nThe second approach takes more time, but is generally more data-efficient. We generate a set of cross-validated predictions on the training data. These predictions come from models that are close to, but not exactly identical to, your original model. However, this small disrepancy is usually minor and the calibration approach works well. For details, see the \"Spline-Based Probability Calibration\" paper referenced above.\nApproach 1: Independent validation set\nFirst let us demostrate how we would fix this using the independent validation set.\nSplineCalib object\nThe SplineCalib object is similar in spirit to preprocessors / data transformations in scikit-learn. The two main operations are fit and calibrate (akin to fit and transform in sklearn).\nTo fit a calibration object, we give it a set of uncalibrated predictions from a model, and the corresponding truth set. The fit routine will learn the spline curve that best maps the uncalibrated scores to actual probabilities.\nEnd of explanation\n# Get the cross validated predictions given a model and training data.\ncv_preds_train = mli.cv_predictions(rfmodel1, X_train, y_train, clone_model=True)\ncalib2 = mli.SplineCalib()\ncalib2.fit(cv_preds_train, y_train)\n# Show the reliability diagram for the cross-validated predictions, and the calibration curve\ncalib2.show_calibration_curve()\nmli.plot_reliability_diagram(y_train, cv_preds_train[:,1]);\nmli.plot_reliability_diagram(y_test, calib2.calibrate(preds_test_uncalib));\npreds_test_calib2 = calib2.calibrate(preds_test_uncalib)\nlog_loss(y_test, preds_test_uncalib), log_loss(y_test, preds_test_calib2)\nExplanation: From the above, we see that not only do our reliability diagrams look better, but our log_loss values have substantially improved. Log_loss measures not only the discriminative power of the model but also how well-calibrated it is.\nApproach 2: Cross-validation on the training data\nThe reason to use an independent calibration set (rather than just the training data) is that how the model performs on the training data (that it has already seen) is not indicative of how it will behave on data it has not seen before. We want the calibration to correct how the model will behave on \"new\" data, not the training data.\nAnother approach is to take a cross-validation approach to generating calibration data. We divide the training data into k \"folds\", leave one fold out, train our model (i.e. the choice of model and hyperparameter settings) on the remaining k-1 folds, and then make predictions on the left-out fold. After doing this process k times, each time leaving out a different fold, we will have a set of predictions, each of which was generated by 1 of k slightly different models, but was always generated by a model that did not see that training point. Done properly (assuming no \"leakage\" across the folds), this set of predictions and answers will serve as an appropriate calibration set.\nML-Insights (the package containing SplineCalib, as well as other functionality) has a simple function to generate these cross-validated predictions. We demonstrate it below.\nEnd of explanation\nrfmodel2 = RandomForestClassifier(n_estimators = 500, class_weight='balanced_subsample', random_state=942, n_jobs=-1 )\nrfmodel2.fit(X_train_calib,y_train_calib)\npreds_test_2_uncalib = rfmodel2.predict_proba(X_test)[:,1]\n# Get the cross validated predictions given a model and training data.\ncv_preds_train_calib = mli.cv_predictions(rfmodel2, X_train_calib, y_train_calib, stratified=True, clone_model=True)\ncalib3 = mli.SplineCalib()\ncalib3.fit(cv_preds_train_calib, y_train_calib)\n# Show the reliability diagram for the cross-validated predictions, and the calibration curve\ncalib3.show_calibration_curve()\nmli.plot_reliability_diagram(y_train_calib, cv_preds_train_calib[:,1]);\npreds_test_calib3 = calib3.calibrate(preds_test_2_uncalib)\nlog_loss(y_test, preds_test_2_uncalib), log_loss(y_test, preds_test_calib3)\nroc_auc_score(y_test, preds_test_2_uncalib), roc_auc_score(y_test, preds_test_calib3)\nExplanation: We see above that the cross-validated approach gives similar performance (slightly better in this case). Additionally, we did not use the 20% of data set aside for calibration at all in the second approach. We could use approach 2 on the entire training and calibration data and (presumably) get an even better model.\nEnd of explanation\nimport joblib\njoblib.dump(calib3, 'calib3.pkl')\ncalib3_reloaded=joblib.load('calib3.pkl')\nmli.plot_reliability_diagram(y_test, calib3_reloaded.calibrate(preds_test_2_uncalib));\ncalib3_reloaded.show_calibration_curve()\nlog_loss(y_test, calib3_reloaded.calibrate(preds_test_2_uncalib))\nExplanation: Indeed, we get a slightly better AUC and log_loss both before and after calibration, due to having a larger training set for our model to learn from\nSerializing Models\nThe SplineCalib object can be saved to disk easily with joblib.dump() and reloaded with joblib.load()\nEnd of explanation\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom betacal import BetaCalibration\n# Fit three-parameter beta calibration\nbc = BetaCalibration(parameters=\"abm\")\nbc.fit(cv_preds_train_calib[:,1], y_train_calib)\n# Fit Isotonic Regression\niso = IsotonicRegression()\niso.fit(cv_preds_train_calib[:,1], y_train_calib)\n# Fit Platt scaling (logistic calibration)\nlr = LogisticRegression(C=99999999999)\nlr.fit(cv_preds_train_calib[:,1].reshape(-1,1), y_train_calib)\ntvec = np.linspace(0,1,1001)\nbc_probs = bc.predict(tvec)\niso_probs = iso.predict(tvec)\nplatt_probs = lr.predict_proba(tvec.reshape(-1,1))[:,1]\nsplinecalib_probs = calib3.calibrate(tvec)\n#calib3.show_calibration_curve()\nmli.plot_reliability_diagram(y_train_calib, cv_preds_train_calib[:,1], error_bars=False);\nplt.plot(tvec, splinecalib_probs, label='SplineCalib')\nplt.plot(tvec, bc_probs, label='Beta')\nplt.plot(tvec, iso_probs, label='Isotonic')\nplt.plot(tvec, platt_probs, label='Platt')\nplt.legend()\nplt.title('Calibration Curves for different methods');\npreds_test_bc = bc.predict(preds_test_2_uncalib)\npreds_test_iso = iso.predict(preds_test_2_uncalib)\npreds_test_platt = lr.predict_proba(preds_test_2_uncalib.reshape(-1,1))[:,1]\npreds_test_splinecalib = calib3.calibrate(preds_test_2_uncalib)\nbc_loss = log_loss(y_test, preds_test_bc)\niso_loss = log_loss(y_test, preds_test_iso)\nplatt_loss = log_loss(y_test, preds_test_platt)\nsplinecalib_loss = log_loss(y_test, preds_test_splinecalib)\nprint('Platt loss = {}'.format(np.round(platt_loss,5)))\nprint('Beta Calib loss = {}'.format(np.round(bc_loss,5)))\nprint('Isotonic loss = {}'.format(np.round(iso_loss,5)))\nprint('SplineCalib loss = {}'.format(np.round(splinecalib_loss,5)))\nExplanation: Comparison to Other Calibration Approaches\nHere we compare SplineCalib to Isotonic Regression, Platt Scaling and Beta Calibration.\nEnd of explanation"}}},{"rowIdx":2160,"cells":{"Unnamed: 0":{"kind":"number","value":2160,"string":"2,160"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Some Multiplicative Functionals\nDaisuke Oyama, Thomas J. Sargent and John Stachurski\nStep1: Plan of the notebook\nIn other quant-econ lectures\n(\"Markov Asset Pricing\" and\n\"The Lucas Asset Pricing Model\"),\nwe have studied the celebrated Lucas asset pricing model (Lucas (1978)) that is cast in a setting in which the key objects of the theory,\nnamely, a stochastic discount factor process, an aggregate consumption process, and an\nasset payout process, are all taken to be stationary.\nIn this notebook, we shall learn about some tools that allow us to extend asset pricing models to settings in which neither the stochastic discount factor process nor the asset's payout process is a stationary process. The key tool is the class of multiplicative functionals from the stochastic process literature that Hansen and Scheinkman (2009) have adapted so that they can be applied to asset pricing and other interesting macroeconomic problems. \nIn this notebook, we confine ourselves to studying a special type of multiplicative functional, namely, multiplicative functionals driven by finite state Markov chains. We'll learn about some of their properties and applications. Among other things, we'll\nobtain Hansen and Scheinkman's more general multiplicative decomposition of our particular type of multiplicative functional into the following three primitive types of multiplicative functions\nStep2: Clearly, this Markov chain is irreducible\nStep3: Create a MultFunctionalFiniteMarkov instance\nStep4: The dominant eigenvalue, denoted $\\exp(\\eta)$ above, of $\\widetilde P$ is\nStep5: The value $\\eta$ is\nStep6: The (normalized) dominant eigenvector $e$ of $\\widetilde P$ is\nStep7: Let us simulate our MultFunctionalFiniteMarkov\nStep8: The simulation results are contained in res.\nLet's check that M and M_tilde satisfy the identity from their definition (up to numerical errors).\nStep9: Likelihood ratio processes\nA likelihood ratio process is a multiplicative martingale with mean $1$. \nA multiplicative martingale process ${\\widetilde M_t }_{t=0}^\\infty$ that starts from $\\widetilde M_0 = 1$ is a likelihood ratio process.\nEvidently, a likelihood ratio process satisfies\n$$ E [\\widetilde M_t \\mid {\\mathfrak F}_0] = 1 .$$\nHansen and Sargent (2017) point out that likelihood ratio processes have the following peculiar property\nStep10: We revisit the peculiar sample path property at the end of this notebook.\nStochastic discount factor and exponentially changing asset payouts\nDefine a matrix ${\\sf S}$ whose $(x, y)$th element is ${\\sf S}(x,y) = \\exp(G_S(x,y))$, where $G_S(x,y)$ is a stochastic discount rate\nfor moving from state $x$ at time $t$ to state $y$ at time $t+1$.\nA stochastic discount factor process ${S_t}_{t=0}^\\infty$ is governed by the multiplicative functional\nStep11: (1) Display the $\\widetilde M$ matrices for $S_t$ and $d_t$.\nStep12: (2) Plot sample paths of $S_t$ and $d_t$.\nStep13: (2) Print $v$.\nStep14: (3) Plot sample paths of $p_t$ and $d_t$.\nStep15: (5) Experiment with a different $G_S$ matrix.\nStep16: Lucas asset pricing model with growth\nAs an example of our model of a stochastic discount factor and payout process, we'll adapt a version of the famous Lucas (1978) asset pricing model to have an exponentially growing aggregate consumption endowment.\nWe'll use CRRA utility\n$u(c) = c^{1-\\gamma}/(1-\\gamma)$,\na common specification in applications of the Lucas model.\nSo now we let $d_t = C_t$, aggregate consumption, and we let \n$$\\frac{S_{t+1}}{S_t} = \\exp(-\\delta) \\left(\\frac{C_{t+1}}{C_t} \\right)^{-\\gamma} ,$$\nwhere $\\delta > 0$ is a rate of time preference and $\\gamma > 0$ is a coefficient of relative risk aversion. \nTo obtain this special case of our model, we set\n$$ {\\sf S}(x, y) = \\exp(-\\delta) {\\sf D}(x, y)^{-\\gamma}, $$\nwhere we now interpret ${\\sf D}(x, y)$ as the multiplicative rate of growth of the level of aggregate consumption between $t$ and $t+1$ when $X_t = x$ and $X_{t+1} = y$. \nTerm structure of interest rates\nWhen the Markov state $X_t = x$ at time $t$, the price of a risk-free zero-coupon bond paying one unit of consumption at time $t+j$\nis\n$$ p_{j,t} = E \\left[ \\frac{S_{t+j}}{S_t} \\Bigm| X_t = x \\right]. $$\nLet the matrix $\\widehat{P}$ be given by $\\widehat{P}(x, y) = P(x, y) {\\sf S}(x, y)$\nand apply the above forecasting formula to deduce\n$$\n p_{j,t} = \\left( \\sum_{y \\in S} \\widehat P^j(x, y) \\right).\n$$\nThe yield $R_{jt}$ on a $j$ period risk-free bond satisfies\n$$ p_{jt} = \\exp(-j R_{jt}) $$\nor\n$$ R_{jt} = -\\frac{\\log(p_{jt})}{j}. $$\nFor a given $t$, \n$$ \\begin{bmatrix} R_{1t} & R_{2t} & \\cdots & R_{Jt} \\end{bmatrix} $$\nis the term structure of interest rates on risk-free zero-coupon bonds.\nSimulating the Lucas asset pricing model\nWrite $y$ for the process of quarterly per capita consumption growth with mean $\\mu_C$.\nIn the following example,\nwe assume that $y - \\mu_C$ follows a discretized version of an AR(1) process\n(while independent of the Markov state),\nwhere the discrete approximation is derived by the routine\ntauchen\nfrom quantecon.markov.\nStep17: Create a LucasTreeFiniteMarkov instance\nStep18: Simulate the model\nStep19: Plotting the term structure of interest rates\nStep20: The term structure of interest rates R is a sequence (of length J)\nof vectors (of length n each).\nInstead of plotting the whole R,\nwe plot the sequences for the \"low\", \"middle\", and \"high\" states.\nHere we define those states as follows.\nThe vector $(p_{jt}|X_t = x)_{x \\in S}$, if appropriately rescaled,\nconverges as $j \\to \\infty$\nto an eigenvector of $\\widehat P$ that corresponds to the dominant eigenvalue,\nwhich equals mf.e times some constant.\nThus call the states that correspond to the smallest, largest, and middle values of mf.e\nthe high, low, and middle states.\nStep21: Another class of examples\nLet the elements of ${\\sf D}$ (i.e., the multiplicative growth rates of the dividend or consumption process) be, for example, \n$$ {\\sf D} = \\begin{bmatrix} .95 & .975 & 1 \\cr\n .975 & 1 & 1.025 \\cr\n 1 & 1.025 & 1.05 \\end{bmatrix}.$$\nHere the realized growth rate depends on both $X_t$ and $X_{t+1}$ -- i.e., the value of the state last period (i) and this period (j). \nHere we have imposed symmetry to save parameters, but of course there is no reason to do that. \nWe can combine this specification with various specifications of $P$ matrices e.g., an \"i.i.d.\" state evolution process would be represented with $P$ in which all rows are identical. Even that simple specification\ncan some interesting outcomes with the above ${\\sf D}$. \nWe'll try this little $3 \\times 3$ example with a Lucas model below.\nBut first a word of caution. \nWe have to choose values for the consumption growth rate matrix $G_C$ and\nthe transition matrix $P$ so that pertinent eigenvalues are smaller than one in modulus.\nThis check is implemented in the code.\nStep22: The peculiar sample path property revisited\nConsider again the multiplicative martingale that associated with the $5$ state Lucas model studied earlier.\nRemember that by construction, this is a likelihood ratio process.\nHere we'll simulate a number of paths and build up histograms of $\\widetilde M_t$ at various values of $t$.\nThese histograms should help us understand what is going on to generate the peculiar property mentioned above.\nAs $t \\rightarrow +\\infty$, notice that\nmore and more probability mass piles up near zero, $\\ldots$ but\na longer and longer thin right tail emerges."},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom quantecon.markov import tauchen, MarkovChain\nfrom mult_functional import MultFunctionalFiniteMarkov\nfrom asset_pricing_mult_functional import (\n AssetPricingMultFiniteMarkov, LucasTreeFiniteMarkov\n)\nExplanation: Some Multiplicative Functionals\nDaisuke Oyama, Thomas J. Sargent and John Stachurski\nEnd of explanation\n# Transition probability matrix\nP = [[0.4, 0.6],\n [0.2, 0.8]]\n# Instance of MarkovChain from quantecon.markov\nmc = MarkovChain(P)\nExplanation: Plan of the notebook\nIn other quant-econ lectures\n(\"Markov Asset Pricing\" and\n\"The Lucas Asset Pricing Model\"),\nwe have studied the celebrated Lucas asset pricing model (Lucas (1978)) that is cast in a setting in which the key objects of the theory,\nnamely, a stochastic discount factor process, an aggregate consumption process, and an\nasset payout process, are all taken to be stationary.\nIn this notebook, we shall learn about some tools that allow us to extend asset pricing models to settings in which neither the stochastic discount factor process nor the asset's payout process is a stationary process. The key tool is the class of multiplicative functionals from the stochastic process literature that Hansen and Scheinkman (2009) have adapted so that they can be applied to asset pricing and other interesting macroeconomic problems. \nIn this notebook, we confine ourselves to studying a special type of multiplicative functional, namely, multiplicative functionals driven by finite state Markov chains. We'll learn about some of their properties and applications. Among other things, we'll\nobtain Hansen and Scheinkman's more general multiplicative decomposition of our particular type of multiplicative functional into the following three primitive types of multiplicative functions:\na nonstochastic process displaying deterministic exponential growth;\na multiplicative martingale or likelihood ratio process; and\na stationary stochastic process that is the exponential of another stationary process. \nThe first two of these primitive types are nonstationary while the third is stationary. The first is nonstochastic, while the second and third are stochastic.\nAfter taking a look at the behavior of these three primitive components, we'll apply this structure to model\na stochastically, exponentially declining stochastic discount factor process;\na stochastically, exponentially growing or declining asset payout or dividend process;\nthe prices of claims to exponentially growing or declining payout processes; and\na theory of the term structure of interest rates.\nWe begin by describing a basic setting that we'll use in several applications later in this notebook.\nMultiplicative functional driven by a finite state Markov chain\nLet $S$ be the integers ${0, \\ldots, n-1}$. Because we study stochastic\nprocesses taking values in $S$, elements of $S$ will be denoted by symbols\nsuch as $x, y$ instead of $i, j$. Also, to avoid double subscripts, for a\nvector $h \\in {\\mathbb R}^n$ we will write $h(x)$ instead of $h_x$ for the value at\nindex $x$.\n(In fact $h$ can also be understood as a function $h \\colon S \\to {\\mathbb R}$. However, in expressions involving matrix algebra we always regard it as a column vector. Similarly, $S$ can be any finite set but in what follows we identify it with ${0, \\ldots, n-1}$.) \nMatrices are represented by symbols such as ${\\mathbf P}$ and ${\\mathbf Q}$. Analogous to \nthe vector case, the $(x,y)$-th element of matrix ${\\mathbf Q}$ is written\n${\\mathbf Q}(x, y)$\nrather than ${\\mathbf Q}_{x y}$. A nonnegative $n \\times n$ matrix ${\\mathbf\nQ}$ is\ncalled irreducible if, for any $(x, y) \\in S \\times S$, there exists an\ninteger $m$ such that ${\\mathbf Q}^m(x, y) > 0$. It is called primitive if there\nexists an integer $m$ such that ${\\mathbf Q}^m(x, y) > 0$ for all $(x, y) \\in S \\times\nS$.\nA positive integer $d$ is the period of $x \\in S$\nif it is the greatest common divisor of all $m$'s such that ${\\mathbf Q}^m(x, x) > 0$.\nIf ${\\mathbf Q}$ is irreducible, then all $x$'s in $S$ have the same period,\nwhich is called the period of the matrix ${\\mathbf Q}$.\n${\\mathbf Q}$ is called aperiodic if its period is one.\nA nonnegative matrix is irreducible and aperiodic if and only if it is primitive.\nLet ${\\mathbf P}$ be a stochastic $n \\times n$ matrix and let ${X_t}$ be\na Markov process with transition probabilities ${\\mathbf P}$. That is, ${X_t}$ is a Markov process on $S$ satisfying\n$$\n \\mathbb P [ X_{t+1} = y \\mid {\\mathcal F}_t] \n = {\\mathbf P}(X_t, y)\n$$\nfor all $y \\in S$. Here ${{\\mathcal F}_t}$ is the natural filtration generated by\n${X_t}$ and the equality holds almost surely on an underlying probability\nspace $(\\Omega, {\\mathcal F}, \\mathbb P)$. \nA martingale with respect to ${{\\mathcal F}t}$ is a real-valued stochastic\nprocess on $(\\Omega, {\\mathcal F}, \\mathbb P)$ satisfying $E[|M_t|] < \\infty$ and $E[M{t+1} \\mid\n{\\mathcal F}_t] = M_t$ for all $t$.\nA multiplicative functional generated by ${X_t}$ is a real-valued stochastic\nprocess ${M_t}$ satisfying $M_0 > 0$ and\n$$\n \\frac{M_{t+1}}{M_t} = {\\mathbf M}(X_t, X_{t+1})\n$$\nfor some strictly positive $n \\times n$ matrix ${\\mathbf M}$.\nIf, in addition, \n$$\n E[ {\\mathbf M}(X_t, X_{t+1}) \\mid {\\mathcal F}_t] = 1,\n$$\nthen ${M_t}$ is clearly a martingale. Given its construction as a product of factors ${\\mathbf M}(X_t, X_{t+1})$, it is sometimes called a\nmultiplicative martingale.\nIf we write\n$$\n \\ln M_{t+1} - \\ln M_t = {\\mathbf G}(X_t, X_{t+1})\n$$\nwhere\n$$\n {\\mathbf G}(X_t, X_{t+1}) := \\ln {\\mathbf M}(X_t, X_{t+1}),\n$$\nthen ${\\mathbf G}(x, y)$ can be interpreted as the growth rate of ${M_t}$ at state pair $(x, y)$.\nA likelihood ratio process is a multiplicative martingale ${M_t}$ with initial condition $M_0 = 1$. From this initial condition and the martingale property it is easy to show that\n$$\n E[M_t] = E[M_t \\mid {\\mathcal F}_0] = 1\n$$\nfor all $t$.\nMartingale decomposition\nLet ${\\mathbf P}$ be a stochastic matrix, let ${\\mathbf M}$ be a positive $n\n\\times n$ matrix and let ${M_t}$ be the multiplicative functional defined\nabove. Assume that ${\\mathbf P}$ is irreducible. Let $\\widetilde {\\mathbf P}$ be defined by\n$$\n \\widetilde {\\mathbf P} (x, y) = {\\mathbf M}(x, y) {\\mathbf P}(x, y)\n \\qquad ((x, y) \\in S \\times S).\n$$\nUsing the assumptions that ${\\mathbf P}$ is irreducible and ${\\mathbf M}$ is positive, it can be\nshown that $\\widetilde {\\mathbf P}$ is also irreducible.\nBy the Perron-Frobenius theorem, there exists for $\\widetilde {\\mathbf P}$ a unique eigenpair $(\\lambda,\ne) \\in {\\mathbb R} \\times {\\mathbb R}^n$ such that $\\lambda$ and all elements of $e$ are strictly positive. Letting $\\eta := \\log \\lambda$, we have\n$$\n \\widetilde {\\mathbf P} e = \\exp(\\eta) e.\n$$\nNow define $n \\times n$ matrix $\\widetilde {\\mathbf M}$ by\n$$\n \\widetilde {\\mathbf M}(x, y) := \\exp(- \\eta) {\\mathbf M}(x, y) \\frac{e(y)}{e(x)}.\n$$\nNote that $\\widetilde {\\mathbf M}$ is also strictly positive. By construction, for each $x \\in\nS$ we have\n\\begin{align}\n \\sum_{y \\in S} \\widetilde {\\mathbf M}(x, y) {\\mathbf P}(x, y)\n & = \\sum_{y \\in S} \\exp(- \\eta) {\\mathbf M}(x, y) \\frac{e(y)}{e(x)} {\\mathbf P}(x, y)\n \\\n & = \\exp(- \\eta) \\frac{1}{e(x)} \\sum_{y \\in S} \\widetilde {\\mathbf P}(x, y) e(y) \n \\\n & = \\exp(- \\eta) \\frac{1}{e(x)} \\widetilde {\\mathbf P} e(x) = 1.\n\\end{align}\nNow let ${\\widetilde M_t}$ be the multiplicative functional defined by\n$$\n \\frac{\\widetilde M_{t+1}}{\\widetilde M_t} = \\widetilde {\\mathbf M}(X_t, X_{t+1})\n \\quad \\text{and} \\quad\n \\widetilde M_0 = 1.\n$$\nIn view of our proceeding calculations, we have\n$$\n E \n \\left[\n \\frac{\\widetilde M_{t+1}}{\\widetilde M_t} \n \\Bigm| {\\mathcal F}t\n \\right]\n = E[ \\widetilde {\\mathbf M}(X_t, X{t+1}) \\mid {\\mathcal F}t]\n = \\sum{y \\in S} \\widetilde {\\mathbf M}(X_t, y) {\\mathbf P}(X_t, y) = 1.\n$$\nHence ${\\widetilde M_t}$ is a likelihood ratio process.\nBy reversing the construction of $\\widetilde {\\mathbf M}$ given above, we can write \n$$\n {\\mathbf M}(x, y) = \\exp( \\eta) \\widetilde {\\mathbf M}(x, y) \\frac{e(x)}{e(y)}\n$$\nand hence\n$$\n \\frac{M_{t+1}}{M_t} \n = \n \\exp( \\eta)\n \\frac{e(X_t)}{e(X_{t+1})}\n \\frac{\\widetilde M_{t+1}}{\\widetilde M_t} .\n$$\nIn this equation we have decomposed the original multiplicative functional\ninto the product of\na nonstochastic component $\\exp( \\eta)$,\na stationary sequence $e(X_t)/e(X_{t+1})$, and\nthe factors $\\widetilde M_{t+1}/\\widetilde M_t$ of a likelihood ratio process.\nSimulation strategy\nLet $x_t$ be the index of the Markov state at time $t$ and let ${ x_0, x_1, \\ldots, x_T}$ be a simulation of the Markov process for ${X_t}$. \nWe can use the formulas above easily to generate simulations of the multiplicative functional $M_t$ and of the positive multiplicative martingale $\\widetilde M_t$.\nForecasting formulas\nLet ${M_t}$ be the multiplicative functional described above with transition\nmatrix ${\\mathbf P}$ and matrix ${\\mathbf M}$ defining the multiplicative increments. We can\nuse $\\widetilde {\\mathbf P}$ to forecast future observations of ${M_t}$. In particular,\nwe have the relation\n$$\n E[ M_{t+j} \\mid X_t = x]\n = M_t \\sum_{y \\in S} \\widetilde {\\mathbf P}^j(x, y) \n \\qquad (x \\in S).\n$$\nThis follows from the definition of ${M_t}$, which allows us to write\n$$\n M_{t+j} = M_t {\\mathbf M}(X_t, X_{t+1}) \\cdots {\\mathbf M}(X_{t+j-1}, X_{t+j}).\n$$\nTaking expectations and conditioning on $X_t = x$ gives\n\\begin{align}\n E[ M_{t+j} \\mid X_t = x]\n & = \\sum_{(x_1, \\ldots, x_j)} \n M_t {\\mathbf M}(x, x_1) \\cdots {\\mathbf M}(x_{j-1}, x_j)\n {\\mathbf P}(x, x_1) \\cdots {\\mathbf P}(x_{j-1}, x_j)\n \\\n & = \\sum_{(x_1, \\ldots, x_j)} \n M_t \\widetilde {\\mathbf P}(x, x_1) \\cdots \\widetilde {\\mathbf P}(x_{j-1}, x_j)\n \\\n & = M_t \\sum_{y \\in S} \\widetilde {\\mathbf P}^j(x, y) .\n\\end{align}\nImplementation\nThe MultFunctionalFiniteMarkov class\nimplements multiplicative functionals driven by finite state Markov chains.\nHere we briefly demonstrate how to use it.\nEnd of explanation\nmc.is_irreducible\n# Growth rate matrix\nG = [[-1, 0],\n [0.5, 1]]\nExplanation: Clearly, this Markov chain is irreducible:\nEnd of explanation\nmf = MultFunctionalFiniteMarkov(mc, G, M_inits=100)\nmf.M_matrix\nExplanation: Create a MultFunctionalFiniteMarkov instance:\nEnd of explanation\nmf.exp_eta\nExplanation: The dominant eigenvalue, denoted $\\exp(\\eta)$ above, of $\\widetilde P$ is\nEnd of explanation\nmf.eta\nExplanation: The value $\\eta$ is\nEnd of explanation\nmf.e\nExplanation: The (normalized) dominant eigenvector $e$ of $\\widetilde P$ is\nEnd of explanation\nts_length = 10\nres = mf.simulate(ts_length)\nExplanation: Let us simulate our MultFunctionalFiniteMarkov:\nEnd of explanation\nexp_eta_geo_series = np.empty_like(res.M)\nexp_eta_geo_series[0] = 1\nexp_eta_geo_series[1:] = mf.exp_eta\nnp.cumprod(exp_eta_geo_series, out=exp_eta_geo_series)\nM_2 = res.M[0] * res.M_tilde * mf.e[res.X[0]] * exp_eta_geo_series / mf.e[res.X]\nM_2\nM_2 - res.M\nExplanation: The simulation results are contained in res.\nLet's check that M and M_tilde satisfy the identity from their definition (up to numerical errors).\nEnd of explanation\nts_length = 120\nnum_reps = 100\nres = mf.simulate(ts_length, num_reps=num_reps)\nylim = (0, 50)\nfig, ax = plt.subplots(figsize=(8,5))\nfor i in range(num_reps):\n ax.plot(res.M_tilde[i], color='k', alpha=0.5)\n ax.set_xlim(0, ts_length)\n ax.set_ylim(*ylim)\nax.set_title(r'{0} sample paths of $\\widetilde M_t$'.format(num_reps))\nplt.show()\nExplanation: Likelihood ratio processes\nA likelihood ratio process is a multiplicative martingale with mean $1$. \nA multiplicative martingale process ${\\widetilde M_t }_{t=0}^\\infty$ that starts from $\\widetilde M_0 = 1$ is a likelihood ratio process.\nEvidently, a likelihood ratio process satisfies\n$$ E [\\widetilde M_t \\mid {\\mathfrak F}_0] = 1 .$$\nHansen and Sargent (2017) point out that likelihood ratio processes have the following peculiar property:\nAlthough $E{\\widetilde M}{j} = 1$ for each $j$, ${{\\widetilde M}{j} : j=1,2,... }$ converges almost surely to zero.\nThe following graph, and also one at the end of this notebook, illustrate the peculiar property by reporting simulations of many sample paths of a ${\\widetilde M_t }_{t=0}^\\infty$ process.\nEnd of explanation\n# Transition probability matrix\nP = [[0.4, 0.6],\n [0.2, 0.8]]\n# Instance of MarkovChain from quantecon.markov\nmc = MarkovChain(P)\n# Stochastic discount rate matrix\nG_S = [[-0.02, -0.03],\n [-0.01, -0.04]]\n# Dividend growth rate matrix\nG_d = [[0.01, 0.02],\n [0.005, 0.02]]\n# AssetPricingMultFiniteMarkov instance\nap = AssetPricingMultFiniteMarkov(mc, G_S, G_d)\nExplanation: We revisit the peculiar sample path property at the end of this notebook.\nStochastic discount factor and exponentially changing asset payouts\nDefine a matrix ${\\sf S}$ whose $(x, y)$th element is ${\\sf S}(x,y) = \\exp(G_S(x,y))$, where $G_S(x,y)$ is a stochastic discount rate\nfor moving from state $x$ at time $t$ to state $y$ at time $t+1$.\nA stochastic discount factor process ${S_t}_{t=0}^\\infty$ is governed by the multiplicative functional:\n$$\n{\\frac {S_{t+1}}{S_t}} = \\exp[ G_S(X_t, X_{t+1} ) ] = {\\sf S}(X_t, X_{t+1}).\n$$\nDefine a matrix ${\\sf D}$ whose $(x,y)$th element is ${\\sf D}(x,y) = \\exp(G_d(x,y))$.\nA non-negative payout or dividend process ${d_t}_{t=0}^\\infty$ is governed by the multiplicative functional:\n$$\n{\\frac {d_{t+1}}{d_t}} = \\exp\\left[ G_d(X_t,X_{t+1}) \\right] = {\\sf D}(X_t, X_{t+1}).\n$$\nLet $p_t$ be the price at the beginning of period $t$ of a claim to the stochastically growing or shrinking stream of payouts\n${d_{t+j}}_{j=0}^\\infty$.\nIt satisfies\n$$\np_t = E\\left[\\frac{S_{t+1}}{S_t} (d_t + p_{t+1}) \\Bigm| {\\mathfrak F}_t\\right] ,\n$$\nor\n$$\n\\frac{p_t}{d_t} =\nE\\left[\\frac{S_{t+1}}{S_t}\n \\left(1 + \\frac{d_{t+1}}{d_t} \\frac{p_{t+1}}{d_{t+1}}\\right)\n \\Bigm| {\\mathfrak F}_t\\right] ,\n$$\nwhere the time $t$ information set ${\\mathfrak F}_t$ includes $X_t, S_t, d_t$.\nGuessing that the price-dividend ratio $\\frac{p_t}{d_t}$ is a function of the Markov state $X_t$ only, and letting\nit equal $v(x)$ when $X_t = x$, write the preceding equation as\n$$ v(x) = \\sum_{x \\in S} P(x,y) \\left[ {\\sf S}(x,y) \\mathbf{1} + {\\sf S}(x,y) {\\sf D}(x,y) v(y) \\right] $$\nor\n$$\nv = c + \\widetilde{P} v ,\n$$\nwhere $c = \\widehat{P} \\mathbf{1}$ is by construction a nonnegative vector and we have defined\nthe nonnegative matrices $\\widetilde{P} \\in \\mathbb{R}^{n \\times n}$ and\n$\\widehat{P} \\in \\mathbb{R}^{n \\times n}$ by\n$$\n\\begin{aligned}\n\\widetilde{P}(x,y) &= P(x,y) {\\sf S}(x,y) {\\sf D}(x,y), \\\n\\widehat{P}(x,y) &= P(x,y) {\\sf S}(x,y).\n\\end{aligned}\n$$\nThe equation $v = \\widetilde{P} v + c$ has a nonnegative solution\nfor any nonnegative vector $c$ if and only if\nall the eigenvalues of $\\widetilde{P}$ are smaller than $1$ in modulus.\nA sufficient condition for existence of a nonnegative solution is that all the column sums, or all the row sums, of $\\widetilde{P}$ are less than one, which holds when $G_S + G_d \\ll 0$. This condition describes a sense in which discounting counteracts growth in dividends.\nGiven a solution $v$, the price-dividend ratio is a stationary process that is a fixed function of the Markov state:\n$$\n\\frac{p_t}{d_t} = v(x) \\text{ when $X_t = x$}.\n$$\nMeanwhile, both the asset price process and the dividend process are multiplicative functionals that experience either multiplicative growth or decay.\nImplementation\nThe AssetPricingMultFiniteMarkov class\nimplements the asset pricing model with the specification of the stochastic discount factor process described above.\nBelow is an example of how to use the class. \nPlease note that the stochastic discount rate matrix $G_S$ and the payout growth rate matrix $G_d$ are specified independently.\nIn the Lucas asset pricing model to be described below, the matrix $G_S$ is a function\nof the payoff growth rate matrix $G_d$ and another parameter $\\gamma$ that is a coefficient of relative risk aversion in the utility function of a representative consumer,\nas well as the discount rate $\\delta$.\nEnd of explanation\nap.mf_S.M_tilde_matrix\nap.mf_d.M_tilde_matrix\nExplanation: (1) Display the $\\widetilde M$ matrices for $S_t$ and $d_t$.\nEnd of explanation\nts_length = 250\nres = ap.simulate(ts_length)\npaths = [res.S, res.d]\nlabels = [r'$S_t$', r'$d_t$']\ntitles = ['Sample path of ' + label for label in labels]\nloc = 4\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nExplanation: (2) Plot sample paths of $S_t$ and $d_t$.\nEnd of explanation\nprint(\"price-dividend ratio in different Markov states = {0}\".format(ap.v))\nExplanation: (2) Print $v$.\nEnd of explanation\npaths = [res.p, res.d]\nlabels = [r'$p_t$', r'$d_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nExplanation: (3) Plot sample paths of $p_t$ and $d_t$.\nEnd of explanation\n# Change G_s[0, 1] from -0.03 to -1\nG_S_2 = [[-0.02, -1],\n [-0.01, -0.04]]\nap_2 = AssetPricingMultFiniteMarkov(mc, G_S_2, G_d)\nap_2.v\nres_2 = ap_2.simulate(ts_length)\npaths = [res_2.p, res_2.d]\nlabels = [r'$p_t$', r'$d_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nExplanation: (5) Experiment with a different $G_S$ matrix.\nEnd of explanation\nmu_C = .005 # mean of quarterly per capita consumption growth\nsigma_C = .005 # standard deviation of quarterly per capita consumption growth\nrho = .25 # persistence of per capita quarterly consumption growth\n# standard deviation of the underlying noise distribution\nsigma = sigma_C * np.sqrt(1 - rho**2)\nm = 2 # number of standard deviations you would like the gridded vector y to cover\nn = 5 # number of points in the discretization\ny, P = tauchen(rho, sigma, m, n)\nmc = MarkovChain(P)\ny += mu_C # consumption growth vector\n# Consumption growth matrix\nG_C = np.empty((n, n))\nG_C[:] = y\n# Discount rate\ndelta = .01\n# Coefficient of relative risk aversion\ngamma = 20\nExplanation: Lucas asset pricing model with growth\nAs an example of our model of a stochastic discount factor and payout process, we'll adapt a version of the famous Lucas (1978) asset pricing model to have an exponentially growing aggregate consumption endowment.\nWe'll use CRRA utility\n$u(c) = c^{1-\\gamma}/(1-\\gamma)$,\na common specification in applications of the Lucas model.\nSo now we let $d_t = C_t$, aggregate consumption, and we let \n$$\\frac{S_{t+1}}{S_t} = \\exp(-\\delta) \\left(\\frac{C_{t+1}}{C_t} \\right)^{-\\gamma} ,$$\nwhere $\\delta > 0$ is a rate of time preference and $\\gamma > 0$ is a coefficient of relative risk aversion. \nTo obtain this special case of our model, we set\n$$ {\\sf S}(x, y) = \\exp(-\\delta) {\\sf D}(x, y)^{-\\gamma}, $$\nwhere we now interpret ${\\sf D}(x, y)$ as the multiplicative rate of growth of the level of aggregate consumption between $t$ and $t+1$ when $X_t = x$ and $X_{t+1} = y$. \nTerm structure of interest rates\nWhen the Markov state $X_t = x$ at time $t$, the price of a risk-free zero-coupon bond paying one unit of consumption at time $t+j$\nis\n$$ p_{j,t} = E \\left[ \\frac{S_{t+j}}{S_t} \\Bigm| X_t = x \\right]. $$\nLet the matrix $\\widehat{P}$ be given by $\\widehat{P}(x, y) = P(x, y) {\\sf S}(x, y)$\nand apply the above forecasting formula to deduce\n$$\n p_{j,t} = \\left( \\sum_{y \\in S} \\widehat P^j(x, y) \\right).\n$$\nThe yield $R_{jt}$ on a $j$ period risk-free bond satisfies\n$$ p_{jt} = \\exp(-j R_{jt}) $$\nor\n$$ R_{jt} = -\\frac{\\log(p_{jt})}{j}. $$\nFor a given $t$, \n$$ \\begin{bmatrix} R_{1t} & R_{2t} & \\cdots & R_{Jt} \\end{bmatrix} $$\nis the term structure of interest rates on risk-free zero-coupon bonds.\nSimulating the Lucas asset pricing model\nWrite $y$ for the process of quarterly per capita consumption growth with mean $\\mu_C$.\nIn the following example,\nwe assume that $y - \\mu_C$ follows a discretized version of an AR(1) process\n(while independent of the Markov state),\nwhere the discrete approximation is derived by the routine\ntauchen\nfrom quantecon.markov.\nEnd of explanation\nlt = LucasTreeFiniteMarkov(mc, G_C, gamma, delta)\n# Consumption growth rates\nlt.G_C[0]\n# Stochastic discount rates\nlt.G_S[0]\nExplanation: Create a LucasTreeFiniteMarkov instance:\nEnd of explanation\nts_length = 250\nres = lt.simulate(ts_length)\npaths = [res.S, res.d]\nlabels = [r'$S_t$', r'$C_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nprint(\"price-dividend ratio in different states = \")\nlt.v\npaths = [res.p, res.d]\nlabels = [r'$p_t$', r'$C_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nExplanation: Simulate the model:\nEnd of explanation\nG_S = lt.G_S\n# SDF process as a MultiplicativeFunctional\nmf = MultFunctionalFiniteMarkov(mc, G_S)\nP_hat = mf.P_tilde\nJ = 20\n# Sequence of price vectors\np = np.empty((J, n))\np[0] = P_hat.dot(np.ones(n))\nfor j in range(J-1):\n p[j+1] = P_hat.dot(p[j])\n# Term structure\nR = -np.log(p)\nR /= np.arange(1, J+1)[:, np.newaxis]\nR\nExplanation: Plotting the term structure of interest rates\nEnd of explanation\nmf.e\nhi = np.argsort(mf.e)[0]\nlo = np.argsort(mf.e)[-1]\nmid = np.argsort(mf.e)[mf.n//2]\nstates = [hi, mid, lo]\nlabels = [s + ' state' for s in ['high', 'middle', 'low']]\nfig, ax = plt.subplots(figsize=(8,5))\nfor i, label in zip(states, labels):\n ax.plot(np.arange(1, J+1), R[:, i], label=label)\nax.set_xlim((1, J))\nax.legend()\nplt.show()\nExplanation: The term structure of interest rates R is a sequence (of length J)\nof vectors (of length n each).\nInstead of plotting the whole R,\nwe plot the sequences for the \"low\", \"middle\", and \"high\" states.\nHere we define those states as follows.\nThe vector $(p_{jt}|X_t = x)_{x \\in S}$, if appropriately rescaled,\nconverges as $j \\to \\infty$\nto an eigenvector of $\\widehat P$ that corresponds to the dominant eigenvalue,\nwhich equals mf.e times some constant.\nThus call the states that correspond to the smallest, largest, and middle values of mf.e\nthe high, low, and middle states.\nEnd of explanation\n# Growth rate matrix\nG_C = np.log([[.95 , .975, 1],\n [.975, 1 , 1.025],\n [1, 1.025, 1.05]])\n# MarkovChain instance\nP = [[0.1, 0.6, 0.3],\n [0.1, 0.5, 0.4],\n [0.1, 0.6, 0.3]]\nmc = MarkovChain(P)\n# Discount rate\ndelta = .01\n# Coefficient of relative risk aversion\ngamma = 20\nlt = LucasTreeFiniteMarkov(mc, G_C, gamma, delta)\n# Price-dividend ratios\nlt.v\nts_length = 250\nres = lt.simulate(ts_length)\npaths = [res.S, res.d]\nlabels = [r'$S_t$', r'$C_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\npaths = [res.p, res.d]\nlabels = [r'$p_t$', r'$C_t$']\ntitles = ['Sample path of ' + label for label in labels]\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\nfor ax, path, label, title in zip(axes, paths, labels, titles):\n ax.plot(path, label=label)\n ax.set_title(title)\n ax.legend(loc=loc)\nplt.show()\nExplanation: Another class of examples\nLet the elements of ${\\sf D}$ (i.e., the multiplicative growth rates of the dividend or consumption process) be, for example, \n$$ {\\sf D} = \\begin{bmatrix} .95 & .975 & 1 \\cr\n .975 & 1 & 1.025 \\cr\n 1 & 1.025 & 1.05 \\end{bmatrix}.$$\nHere the realized growth rate depends on both $X_t$ and $X_{t+1}$ -- i.e., the value of the state last period (i) and this period (j). \nHere we have imposed symmetry to save parameters, but of course there is no reason to do that. \nWe can combine this specification with various specifications of $P$ matrices e.g., an \"i.i.d.\" state evolution process would be represented with $P$ in which all rows are identical. Even that simple specification\ncan some interesting outcomes with the above ${\\sf D}$. \nWe'll try this little $3 \\times 3$ example with a Lucas model below.\nBut first a word of caution. \nWe have to choose values for the consumption growth rate matrix $G_C$ and\nthe transition matrix $P$ so that pertinent eigenvalues are smaller than one in modulus.\nThis check is implemented in the code.\nEnd of explanation\nmf.P\nT = 200\nnum_reps = 10**5\nres = mf.simulate(T+1, num_reps=num_reps)\nbins = np.linspace(0, 5, num=21)\nbins_mid = (bins[:-1] + bins[1:]) / 2\nnums_row_col = (3, 2)\nxlim = (bins[0], bins[-1])\nylim = (0, 0.6)\nwidth = (bins[0] + bins[-1]) / (len(bins)-1)\nts = [5, 10, 20, 50, 100, 200]\nfig, axes = plt.subplots(*nums_row_col, figsize=(12,10))\nfor i, ax_idx in enumerate(itertools.product(*(range(n) for n in nums_row_col))):\n mean = res.M_tilde[:, ts[i]].mean()\n hist, _ = np.histogram(res.M_tilde[:, ts[i]], bins=bins)\n axes[ax_idx].bar(bins_mid, hist/num_reps, width, align='center')\n axes[ax_idx].vlines(mean, ax.get_ylim()[0], ax.get_ylim()[1], \"k\", \":\")\n axes[ax_idx].set_xlim(*xlim)\n axes[ax_idx].set_ylim(*ylim)\n axes[ax_idx].set_title(r'$t = {}$'.format(ts[i]))\nplt.show()\nExplanation: The peculiar sample path property revisited\nConsider again the multiplicative martingale that associated with the $5$ state Lucas model studied earlier.\nRemember that by construction, this is a likelihood ratio process.\nHere we'll simulate a number of paths and build up histograms of $\\widetilde M_t$ at various values of $t$.\nThese histograms should help us understand what is going on to generate the peculiar property mentioned above.\nAs $t \\rightarrow +\\infty$, notice that\nmore and more probability mass piles up near zero, $\\ldots$ but\na longer and longer thin right tail emerges.\nEnd of explanation"}}},{"rowIdx":2161,"cells":{"Unnamed: 0":{"kind":"number","value":2161,"string":"2,161"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n \nRecommender Systems with Python\nWelcome to the code notebook for Recommender Systems with Python. In this lecture we will develop basic recommendation systems using Python and pandas. There is another notebook: Advanced Recommender Systems with Python. That notebook goes into more detail with the same data set.\nIn this notebook, we will focus on providing a basic recommendation system by suggesting items that are most similar to a particular item, in this case, movies. Keep in mind, this is not a true robust recommendation system, to describe it more accurately,it just tells you what movies/items are most similar to your movie choice.\nThere is no project for this topic, instead you have the option to work through the advanced lecture version of this notebook (totally optional!).\nLet's get started!\nImport Libraries\nEnd of explanation\ncolumn_names = ['user_id', 'item_id', 'rating', 'timestamp']\ndf = pd.read_csv('u.data', sep='\\t', names=column_names)\ndf.head()\nExplanation: Get the Data\nEnd of explanation\nmovie_titles = pd.read_csv(\"Movie_Id_Titles\")\nmovie_titles.head()\nExplanation: Now let's get the movie titles:\nEnd of explanation\ndf = pd.merge(df,movie_titles,on='item_id')\ndf.head()\nExplanation: We can merge them together:\nEnd of explanation\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('white')\n%matplotlib inline\nExplanation: EDA\nLet's explore the data a bit and get a look at some of the best rated movies.\nVisualization Imports\nEnd of explanation\ndf.groupby('title')['rating'].mean().sort_values(ascending=False).head()\ndf.groupby('title')['rating'].count().sort_values(ascending=False).head()\nratings = pd.DataFrame(df.groupby('title')['rating'].mean())\nratings.head()\nExplanation: Let's create a ratings dataframe with average rating and number of ratings:\nEnd of explanation\nratings['num of ratings'] = pd.DataFrame(df.groupby('title')['rating'].count())\nratings.head()\nExplanation: Now set the number of ratings column:\nEnd of explanation\nplt.figure(figsize=(10,4))\nratings['num of ratings'].hist(bins=70)\nplt.figure(figsize=(10,4))\nratings['rating'].hist(bins=70)\nsns.jointplot(x='rating',y='num of ratings',data=ratings,alpha=0.5)\nExplanation: Now a few histograms:\nEnd of explanation\nmoviemat = df.pivot_table(index='user_id',columns='title',values='rating')\nmoviemat.head()\nExplanation: Okay! Now that we have a general idea of what the data looks like, let's move on to creating a simple recommendation system:\nRecommending Similar Movies\nNow let's create a matrix that has the user ids on one access and the movie title on another axis. Each cell will then consist of the rating the user gave to that movie. Note there will be a lot of NaN values, because most people have not seen most of the movies.\nEnd of explanation\nratings.sort_values('num of ratings',ascending=False).head(10)\nExplanation: Most rated movie:\nEnd of explanation\nratings.head()\nExplanation: Let's choose two movies: starwars, a sci-fi movie. And Liar Liar, a comedy.\nEnd of explanation\nstarwars_user_ratings = moviemat['Star Wars (1977)']\nliarliar_user_ratings = moviemat['Liar Liar (1997)']\nstarwars_user_ratings.head()\nExplanation: Now let's grab the user ratings for those two movies:\nEnd of explanation\nsimilar_to_starwars = moviemat.corrwith(starwars_user_ratings)\nsimilar_to_liarliar = moviemat.corrwith(liarliar_user_ratings)\nExplanation: We can then use corrwith() method to get correlations between two pandas series:\nEnd of explanation\ncorr_starwars = pd.DataFrame(similar_to_starwars,columns=['Correlation'])\ncorr_starwars.dropna(inplace=True)\ncorr_starwars.head()\nExplanation: Let's clean this by removing NaN values and using a DataFrame instead of a series:\nEnd of explanation\ncorr_starwars.sort_values('Correlation',ascending=False).head(10)\nExplanation: Now if we sort the dataframe by correlation, we should get the most similar movies, however note that we get some results that don't really make sense. This is because there are a lot of movies only watched once by users who also watched star wars (it was the most popular movie).\nEnd of explanation\ncorr_starwars = corr_starwars.join(ratings['num of ratings'])\ncorr_starwars.head()\nExplanation: Let's fix this by filtering out movies that have less than 100 reviews (this value was chosen based off the histogram from earlier).\nEnd of explanation\ncorr_starwars[corr_starwars['num of ratings']>100].sort_values('Correlation',ascending=False).head()\nExplanation: Now sort the values and notice how the titles make a lot more sense:\nEnd of explanation\ncorr_liarliar = pd.DataFrame(similar_to_liarliar,columns=['Correlation'])\ncorr_liarliar.dropna(inplace=True)\ncorr_liarliar = corr_liarliar.join(ratings['num of ratings'])\ncorr_liarliar[corr_liarliar['num of ratings']>100].sort_values('Correlation',ascending=False).head()\nExplanation: Now the same for the comedy Liar Liar:\nEnd of explanation"}}},{"rowIdx":2162,"cells":{"Unnamed: 0":{"kind":"number","value":2162,"string":"2,162"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Interactive Geovisualization of Multimodal Freight Transport Network Criticality\nBramka Arga Jafino\nDelft University of Technology\nFaculty of Technology, Policy and Management\nAn introduction note\nThis notebook provides an interactive tool to geospatially visualize the results of Bangladesh's multimodal freight transport network criticality. The interactivity doesn't work if you open this notebook from the Github page. Instead, in order to run the interactivity, you have to fork this notebook (as well as all the corresponding libraries and files used in this notebook) to your local computer, then run the jupyter notebook on it.\n1. Import all required module and files\nStep1: 2. Interactive visualization\nStep2: There are three elements that can be adjusted in this interactive visualization"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpld3 import plugins, utils\nimport geopandas as gp\nimport pandas as pd\nfrom shapely.wkt import loads\nimport os\nimport sys\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n#Modules developed for this project\nfrom transport_network_modeling import network_visualization as net_v\nfrom __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual, IntSlider\nimport ipywidgets as widgets\n#import criticality results\nresult_df_loc = r'./criticality_results/result_interdiction_1107noz2_v03.csv'\nresult_df = pd.read_csv(result_df_loc)\n#import district shapefile for background\ndistrict_gdf_loc = r'./model_input_data/BGD_adm1.shp'\ndistrict_gdf = gp.read_file(district_gdf_loc)\n#alter the 'geometry' string of the dataframe into geometry object\nresult_df['geometry'] = result_df['geometry'].apply(loads)\n#create geodataframe from criticality results dataframe\ncrs = {'init': 'epsg:4326'}\nresult_gdf = gp.GeoDataFrame(result_df, crs=crs, geometry=result_df['geometry'])\n#record all metrics in a list\nall_metric = ['m1_01', 'm1_02', 'm2_01', 'm2_02', 'm3_01', 'm3_02', 'm4_01', 'm4_02', 'm5_01', 'm6_01',\n 'm7_01', 'm7_02', 'm7_03', 'm8_01', 'm8_02', 'm8_03', 'm9_01', 'm10']\n#create ranking columns for each metric\nfor metric in all_metric:\n result_gdf[metric + '_rank'] = result_gdf[metric].rank(ascending=False)\nExplanation: Interactive Geovisualization of Multimodal Freight Transport Network Criticality\nBramka Arga Jafino\nDelft University of Technology\nFaculty of Technology, Policy and Management\nAn introduction note\nThis notebook provides an interactive tool to geospatially visualize the results of Bangladesh's multimodal freight transport network criticality. The interactivity doesn't work if you open this notebook from the Github page. Instead, in order to run the interactivity, you have to fork this notebook (as well as all the corresponding libraries and files used in this notebook) to your local computer, then run the jupyter notebook on it.\n1. Import all required module and files\nEnd of explanation\n#create special colormap for the visualization\ncmap = plt.get_cmap('YlOrRd')\nnew_cmap1 = net_v.truncate_colormap(cmap, 0.3, 1)\ncmap = plt.get_cmap('Blues')\nnew_cmap2 = net_v.truncate_colormap(cmap, 0.3, 1)\nExplanation: 2. Interactive visualization\nEnd of explanation\nwidgets.interact_manual(net_v.plot_interactive, rank=widgets.IntSlider(min=50, max=500, step=10, value=50),\n metric=widgets.Dropdown(options=all_metric, value='m1_01'),\n show_division=widgets.Checkbox(value=False), result_gdf=fixed(result_gdf),\n cmaps=fixed([new_cmap1, new_cmap2]), district_gdf=fixed(district_gdf));\nExplanation: There are three elements that can be adjusted in this interactive visualization: \n1. First, you can select the metric that results want to be displayed from the 'metric' dropdown list. \n2. Second, you can select the top n links with highest criticality score to be highlighted by adjusting the 'rank' slider. \n3. Third, you can select whether you want to also display Bangladesh's administrative boundary by turning on the 'show_division' toggle button.\nThe red links represent Bangladesh's road network while the blue links represent Bangladesh's waterway network.\nEnd of explanation"}}},{"rowIdx":2163,"cells":{"Unnamed: 0":{"kind":"number","value":2163,"string":"2,163"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Sufficient statistics for online linear regression\nFirst, I need to recreate the data generating function from here in Python. See the code for plot_xy, plot_abline, and SimpleOnlineLinearRegressor on GitHub.\nStep1: Data is not really linear, but let's just do what the exercise tells us to do. Thus, our model is \n\\begin{equation}\ny_i \\sim \\mathcal{N}(w_0 + w_1x_i, \\sigma^2),\n\\end{equation}\nor written in vector notation,\n\\begin{equation}\n\\mathbf{y} \\sim \\mathcal{N}\\left(w_0\\mathbf{1} + w_1\\mathbf{x}, \\sigma^2I\\right).\n\\end{equation}\nThus, we have that\n\\begin{align}\np(\\mathbf{y} \\mid w_0,w_1,\\sigma^2,\\mathbf{x}) &= \\prod_{i=1}^N\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\exp\\left(-\\frac{1}{2\\sigma^2}\\left(y_i - w_0 - w_1x_i\\right)^2\\right) \\\nl(w_0,w_1,\\sigma^2) = \\log p(\\mathbf{y} \\mid w_0,w_1,\\sigma^2,\\mathbf{x}) &= -\\frac{N}{2}\\log(2\\pi) - \\frac{N}{2}\\log(\\sigma^2) - \\frac{1}{2\\sigma^2}\\sum_{i=1}^N\\left(y_i - w_0 - w_1x_i\\right)^2.\n\\end{align}\nLet us try to maximize the log-likelihood. We first solve for $w_0$.\n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_0} = \\frac{1}{\\sigma^2}\\sum_{i=1}^N \\left(y_i - w_0 - w_1x_i\\right)\n= \\frac{1}{\\sigma^2}\\left(-Nw_0 + \\sum_{i=1}^N y_i - w_1 \\sum_{i=1}^Nx_i\\right).\n\\end{align}\nSetting $\\frac{\\partial{l}}{\\partial w_0} = 0$ and solving for $w_0$, we find that\n\\begin{equation}\n\\hat{w}0 = \\frac{\\sum{i=1}^N y_i}{N} - \\hat{w}1\\frac{\\sum{i=1}^N x_i}{N} = \\bar{y} - \\hat{w}_1\\bar{x}\n\\end{equation}\nNext, we solve for $w_1$. Taking the derivative, we have \n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_1} = \\frac{1}{\\sigma^2}\\sum_{i=1}^N x_i\\left(y_i - w_0 - w_1x_i\\right)\n= \\frac{1}{\\sigma^2}\\sum_{i=1}^N\\left(x_iy_i - w_0x_i - w_1x_i^2\\right).\n\\end{align}\nSetting $frac{\\partial{l}}{\\partial w_1} = 0$ and substituting $\\hat{w}_0$ for $w_0$, we have that\n\\begin{align}\n0 &= \\frac{1}{\\sigma^2}\\sum_{i=1}^N\\left(x_iy_i - (\\bar{y} - \\hat{w}1\\bar{x})x_i - \\hat{w}_1x_i^2\\right) \\\n&= \\sum{i=1}^N\\left(x_iy_i - x_i\\bar{y}\\right) -\\hat{w}1\\sum{i=1}^N\\left(x_i^2 - x_i\\bar{x}\\right).\n\\end{align}\nSince $\\sum_{i=1}^N x_i = N\\bar{x}$, we have that\n\\begin{align}\n0 &= \\sum_{i=1}^N\\left(x_iy_i - \\bar{x}\\bar{y}\\right) -\\hat{w}1\\sum{i=1}^N\\left(x_i^2 - \\bar{x}^2\\right) \\\n\\hat{w}1 &= \\frac{\\sum{i=1}^N\\left(x_iy_i - \\bar{x}\\bar{y}\\right)}{\\sum_{i=1}^N\\left(x_i^2 - \\bar{x}^2\\right)} \n= \\frac{\\sum_{i=1}^N\\left(x_iy_i - x_i\\bar{y} -\\bar{x}y_i + \\bar{x}\\bar{y}\\right)}{\\sum_{i=1}^N\\left(x_i^2 - 2x_i\\bar{x} + \\bar{x}^2\\right)} \\\n&= \\frac{\\sum_{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\sum_{i=1}(x_i - \\bar{x})^2} \n= \\frac{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})^2},\n\\end{align}\nwhich is just the MLE for the covariance of $X$ and $Y$ over the variance of $X$. This can also be written as\n\\begin{equation}\n\\hat{w}1 = \\frac{\\frac{1}{N}\\sum{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})^2}\n= \\frac{\\sum_{i=1}^N x_iy_i - \\frac{1}{N}\\left(\\sum_{i=1}^Nx_i\\right)\\left(\\sum_{i=1}^Ny_i\\right)}{\\sum_{i=1}^N x_i^2 - \\frac{1}{N}\\left(\\sum_{i=1}^Nx_i\\right)^2}.\n\\end{equation}\nFinally, solving for $\\sigma^2$, we have that\n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_1} = -\\frac{N}{2\\sigma^2} + \\frac{1}{2(\\sigma^2)^2}\\sum_{i=1}^N\\left(y_i - \\left(w_0 +w_1x_i\\right)\\right)^2.\n\\end{align}\nSetting this equal to $0$, substituting for $w_0$ and $w_1$, we have that\n\\begin{align}\n\\hat{\\sigma}^2 &= \\frac{1}{N}\\sum_{i=1}^N\\left(y_i - \\left(\\hat{w}0 +\\hat{w}_1x_i\\right)\\right)^2 \n= \\frac{1}{N}\\sum{i=1}^N\\left(y_i^2 - 2y_i\\left(\\hat{w}0 +\\hat{w}_1x_i\\right) + \\left(\\hat{w}_0 +\\hat{w}_1x_i\\right)^2\\right) \\\n&= \\hat{w}_0^2 + \\frac{1}{N}\\left(\\sum{i=1}^Ny_i^2 - 2\\hat{w}0\\sum{i=1}^Ny_i - 2\\hat{w}1\\sum{i=1}^N x_iy_i + 2\\hat{w}0\\hat{w}_1\\sum{i=1}^N x_i + \\hat{w}1^2\\sum{i=1}^N x_i^2\\right).\n\\end{align}\nThus, our sufficient statistics are\n\\begin{equation}\n\\left(N, \\sum_{i=1}^N x_i, \\sum_{i=1}^N y_i,\\sum_{i=1}^N x_i^2, \\sum_{i=1}^N y_i^2, \\sum_{i=1}^N x_iy_i\\right).\n\\end{equation}\nStep2: Now, let's verify that the online version comes to the same numbers."},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom linreg import *\nnp.random.seed(2016)\ndef make_data(N):\n X = np.linspace(0, 20, N)\n Y = stats.norm.rvs(size=N, loc=-1.5*X + X*X/9, scale=2)\n return X, Y\nX, Y = make_data(21)\nprint(np.column_stack((X,Y)))\nplot_xy(X, Y)\nplt.show()\nExplanation: Sufficient statistics for online linear regression\nFirst, I need to recreate the data generating function from here in Python. See the code for plot_xy, plot_abline, and SimpleOnlineLinearRegressor on GitHub.\nEnd of explanation\nlinReg = SimpleOnlineLinearRegressor()\nlinReg.fit(X, Y)\n## visualize model\nplot_xy(X, Y)\nplot_abline(linReg.get_params()['slope'], linReg.get_params()['intercept'], \n np.min(X) - 1, np.max(X) + 1, \n ax=plt.gca())\nplt.title(\"Training data with best fit line\")\nplt.show()\nprint(linReg.get_params())\nExplanation: Data is not really linear, but let's just do what the exercise tells us to do. Thus, our model is \n\\begin{equation}\ny_i \\sim \\mathcal{N}(w_0 + w_1x_i, \\sigma^2),\n\\end{equation}\nor written in vector notation,\n\\begin{equation}\n\\mathbf{y} \\sim \\mathcal{N}\\left(w_0\\mathbf{1} + w_1\\mathbf{x}, \\sigma^2I\\right).\n\\end{equation}\nThus, we have that\n\\begin{align}\np(\\mathbf{y} \\mid w_0,w_1,\\sigma^2,\\mathbf{x}) &= \\prod_{i=1}^N\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\exp\\left(-\\frac{1}{2\\sigma^2}\\left(y_i - w_0 - w_1x_i\\right)^2\\right) \\\nl(w_0,w_1,\\sigma^2) = \\log p(\\mathbf{y} \\mid w_0,w_1,\\sigma^2,\\mathbf{x}) &= -\\frac{N}{2}\\log(2\\pi) - \\frac{N}{2}\\log(\\sigma^2) - \\frac{1}{2\\sigma^2}\\sum_{i=1}^N\\left(y_i - w_0 - w_1x_i\\right)^2.\n\\end{align}\nLet us try to maximize the log-likelihood. We first solve for $w_0$.\n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_0} = \\frac{1}{\\sigma^2}\\sum_{i=1}^N \\left(y_i - w_0 - w_1x_i\\right)\n= \\frac{1}{\\sigma^2}\\left(-Nw_0 + \\sum_{i=1}^N y_i - w_1 \\sum_{i=1}^Nx_i\\right).\n\\end{align}\nSetting $\\frac{\\partial{l}}{\\partial w_0} = 0$ and solving for $w_0$, we find that\n\\begin{equation}\n\\hat{w}0 = \\frac{\\sum{i=1}^N y_i}{N} - \\hat{w}1\\frac{\\sum{i=1}^N x_i}{N} = \\bar{y} - \\hat{w}_1\\bar{x}\n\\end{equation}\nNext, we solve for $w_1$. Taking the derivative, we have \n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_1} = \\frac{1}{\\sigma^2}\\sum_{i=1}^N x_i\\left(y_i - w_0 - w_1x_i\\right)\n= \\frac{1}{\\sigma^2}\\sum_{i=1}^N\\left(x_iy_i - w_0x_i - w_1x_i^2\\right).\n\\end{align}\nSetting $frac{\\partial{l}}{\\partial w_1} = 0$ and substituting $\\hat{w}_0$ for $w_0$, we have that\n\\begin{align}\n0 &= \\frac{1}{\\sigma^2}\\sum_{i=1}^N\\left(x_iy_i - (\\bar{y} - \\hat{w}1\\bar{x})x_i - \\hat{w}_1x_i^2\\right) \\\n&= \\sum{i=1}^N\\left(x_iy_i - x_i\\bar{y}\\right) -\\hat{w}1\\sum{i=1}^N\\left(x_i^2 - x_i\\bar{x}\\right).\n\\end{align}\nSince $\\sum_{i=1}^N x_i = N\\bar{x}$, we have that\n\\begin{align}\n0 &= \\sum_{i=1}^N\\left(x_iy_i - \\bar{x}\\bar{y}\\right) -\\hat{w}1\\sum{i=1}^N\\left(x_i^2 - \\bar{x}^2\\right) \\\n\\hat{w}1 &= \\frac{\\sum{i=1}^N\\left(x_iy_i - \\bar{x}\\bar{y}\\right)}{\\sum_{i=1}^N\\left(x_i^2 - \\bar{x}^2\\right)} \n= \\frac{\\sum_{i=1}^N\\left(x_iy_i - x_i\\bar{y} -\\bar{x}y_i + \\bar{x}\\bar{y}\\right)}{\\sum_{i=1}^N\\left(x_i^2 - 2x_i\\bar{x} + \\bar{x}^2\\right)} \\\n&= \\frac{\\sum_{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\sum_{i=1}(x_i - \\bar{x})^2} \n= \\frac{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})^2},\n\\end{align}\nwhich is just the MLE for the covariance of $X$ and $Y$ over the variance of $X$. This can also be written as\n\\begin{equation}\n\\hat{w}1 = \\frac{\\frac{1}{N}\\sum{i=1}(x_i - \\bar{x})(y_i-\\bar{y})}{\\frac{1}{N}\\sum_{i=1}(x_i - \\bar{x})^2}\n= \\frac{\\sum_{i=1}^N x_iy_i - \\frac{1}{N}\\left(\\sum_{i=1}^Nx_i\\right)\\left(\\sum_{i=1}^Ny_i\\right)}{\\sum_{i=1}^N x_i^2 - \\frac{1}{N}\\left(\\sum_{i=1}^Nx_i\\right)^2}.\n\\end{equation}\nFinally, solving for $\\sigma^2$, we have that\n\\begin{align}\n\\frac{\\partial{l}}{\\partial w_1} = -\\frac{N}{2\\sigma^2} + \\frac{1}{2(\\sigma^2)^2}\\sum_{i=1}^N\\left(y_i - \\left(w_0 +w_1x_i\\right)\\right)^2.\n\\end{align}\nSetting this equal to $0$, substituting for $w_0$ and $w_1$, we have that\n\\begin{align}\n\\hat{\\sigma}^2 &= \\frac{1}{N}\\sum_{i=1}^N\\left(y_i - \\left(\\hat{w}0 +\\hat{w}_1x_i\\right)\\right)^2 \n= \\frac{1}{N}\\sum{i=1}^N\\left(y_i^2 - 2y_i\\left(\\hat{w}0 +\\hat{w}_1x_i\\right) + \\left(\\hat{w}_0 +\\hat{w}_1x_i\\right)^2\\right) \\\n&= \\hat{w}_0^2 + \\frac{1}{N}\\left(\\sum{i=1}^Ny_i^2 - 2\\hat{w}0\\sum{i=1}^Ny_i - 2\\hat{w}1\\sum{i=1}^N x_iy_i + 2\\hat{w}0\\hat{w}_1\\sum{i=1}^N x_i + \\hat{w}1^2\\sum{i=1}^N x_i^2\\right).\n\\end{align}\nThus, our sufficient statistics are\n\\begin{equation}\n\\left(N, \\sum_{i=1}^N x_i, \\sum_{i=1}^N y_i,\\sum_{i=1}^N x_i^2, \\sum_{i=1}^N y_i^2, \\sum_{i=1}^N x_iy_i\\right).\n\\end{equation}\nEnd of explanation\nonlineLinReg = SimpleOnlineLinearRegressor()\nw_estimates = pd.DataFrame(index=np.arange(2,22), columns=['w0_est', 'w1_est', 'sigma2'], dtype=np.float64)\nfor i in range(len(Y)):\n onlineLinReg.partial_fit(X[i], Y[i])\n if i >= 1:\n w_estimates.loc[i + 1] = {'w0_est': onlineLinReg.get_params()['intercept'], \n 'w1_est': onlineLinReg.get_params()['slope'],\n 'sigma2': onlineLinReg.get_params()['variance']}\nprint(w_estimates)\nprint(onlineLinReg.get_params())\nplt.figure(figsize=(12,8))\nplt.plot(w_estimates.index, w_estimates['w0_est'], 'o', \n markeredgecolor='black', markerfacecolor='none', markeredgewidth=1,\n label='Intercept estimate')\nplt.plot(w_estimates.index, w_estimates['w1_est'], 'o', \n markeredgecolor='red', markerfacecolor='none', markeredgewidth=1,\n label='Slope estimate')\nplt.grid()\nplt.ylabel('Estimate')\nplt.xlabel('# of data points')\nplt.title('Online Linear Regression Estimates')\nplt.hlines(onlineLinReg.get_params()['intercept'], xmin=np.min(X), xmax=np.max(X) + 5, linestyle='--',\n label='Final intercept estimate')\nplt.hlines(onlineLinReg.get_params()['slope'], xmin=np.min(X), xmax=np.max(X) + 5, linestyle='--', color='red',\n label='Final slope estimate')\nplt.legend(loc='center left', bbox_to_anchor=(1,0.5))\nplt.show()\nExplanation: Now, let's verify that the online version comes to the same numbers.\nEnd of explanation"}}},{"rowIdx":2164,"cells":{"Unnamed: 0":{"kind":"number","value":2164,"string":"2,164"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Stochastic optimization landscape of a minimal MLP\nIn this notebook, we will try to better understand how stochastic gradient works. We fit a very simple non-convex model to data generated from a linear ground truth model.\nWe will also observe how the (stochastic) loss landscape changes when selecting different samples.\nStep1: Data is generated from a simple model\nStep2: We propose a minimal single hidden layer perceptron model with a single hidden unit and no bias. The model has two tunable parameters $w_1$, and $w_2$, such that\nStep4: As in the previous notebook, we define a function to sample from and plot loss landscapes.\nStep5: risks[k, i, j] holds loss value $\\ell(f(w_1^{(i)} , w_2^{(j)}, x_k), y_k)$ for a single data point $(x_k, y_k)$;\nempirical_risk[i, j] corresponds to the empirical risk averaged over the training data points\nStep6: Let's define our train loop and train our model\nStep7: We now plot\nStep8: Observe and comment.\nExercices\nStep9: Utilities to generate the slides figures"},"code_prompt":{"kind":"string","value":"Python Code:\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nfrom torch.nn.functional import mse_loss\nfrom torch.autograd import Variable\nfrom torch.nn.functional import relu\nExplanation: Stochastic optimization landscape of a minimal MLP\nIn this notebook, we will try to better understand how stochastic gradient works. We fit a very simple non-convex model to data generated from a linear ground truth model.\nWe will also observe how the (stochastic) loss landscape changes when selecting different samples.\nEnd of explanation\ndef sample_from_ground_truth(n_samples=100, std=0.1):\n x = torch.FloatTensor(n_samples, 1).uniform_(-1, 1)\n epsilon = torch.FloatTensor(n_samples, 1).normal_(0, std)\n y = 2 * x + epsilon\n return x, y\nn_samples = 100\nstd = 3\nx, y = sample_from_ground_truth(n_samples=100, std=std)\nExplanation: Data is generated from a simple model:\n$$y= 2x + \\epsilon$$\nwhere:\n$\\epsilon \\sim \\mathcal{N}(0, 3)$\n$x \\sim \\mathcal{U}(-1, 1)$\nEnd of explanation\nclass SimpleMLP(nn.Module):\n def __init__(self, w=None):\n super(SimpleMLP, self).__init__()\n self.w1 = Parameter(torch.FloatTensor((1,)))\n self.w2 = Parameter(torch.FloatTensor((1,)))\n if w is None:\n self.reset_parameters()\n else:\n self.set_parameters(w)\n def reset_parameters(self):\n self.w1.uniform_(-.1, .1)\n self.w2.uniform_(-.1, .1)\n def set_parameters(self, w):\n with torch.no_grad():\n self.w1[0] = w[0]\n self.w2[0] = w[1]\n def forward(self, x):\n return self.w1 * relu(self.w2 * x) \nExplanation: We propose a minimal single hidden layer perceptron model with a single hidden unit and no bias. The model has two tunable parameters $w_1$, and $w_2$, such that:\n$$f(x) = w_1 \\cdot \\sigma(w_2 \\cdot x)$$\nwhere $\\sigma$ is the ReLU function.\nEnd of explanation\nfrom math import fabs\ndef make_grids(x, y, model_constructor, expected_risk_func, grid_size=100):\n n_samples = len(x)\n assert len(x) == len(y)\n # Grid logic\n x_max, y_max, x_min, y_min = 5, 5, -5, -5\n w1 = np.linspace(x_min, x_max, grid_size, dtype=np.float32)\n w2 = np.linspace(y_min, y_max, grid_size, dtype=np.float32)\n W1, W2 = np.meshgrid(w1, w2)\n W = np.concatenate((W1[:, :, None], W2[:, :, None]), axis=2)\n W = torch.from_numpy(W)\n # We will store the results in this tensor\n risks = torch.FloatTensor(n_samples, grid_size, grid_size)\n expected_risk = torch.FloatTensor(grid_size, grid_size)\n with torch.no_grad():\n for i in range(grid_size):\n for j in range(grid_size):\n model = model_constructor(W[i, j])\n pred = model(x)\n loss = mse_loss(pred, y, reduction=\"none\")\n risks[:, i, j] = loss.view(-1)\n expected_risk[i, j] = expected_risk_func(W[i, j, 0], W[i, j, 1])\n empirical_risk = torch.mean(risks, dim=0)\n \n return W1, W2, risks.numpy(), empirical_risk.numpy(), expected_risk.numpy()\ndef expected_risk_simple_mlp(w1, w2):\n Question: Can you derive this your-self?\n return .5 * (8 / 3 - (4 / 3) * w1 * w2 + 1 / 3 * w1 ** 2 * w2 ** 2) + std ** 2\nExplanation: As in the previous notebook, we define a function to sample from and plot loss landscapes.\nEnd of explanation\nW1, W2, risks, empirical_risk, expected_risk = make_grids(\n x, y, SimpleMLP, expected_risk_func=expected_risk_simple_mlp)\nExplanation: risks[k, i, j] holds loss value $\\ell(f(w_1^{(i)} , w_2^{(j)}, x_k), y_k)$ for a single data point $(x_k, y_k)$;\nempirical_risk[i, j] corresponds to the empirical risk averaged over the training data points:\n$$ \\frac{1}{n} \\sum_{k=1}^{n} \\ell(f(w_1^{(i)}, w_2^{(j)}, x_k), y_k)$$\nEnd of explanation\nfrom torch.optim import SGD\ndef train(model, x, y, lr=.1, n_epochs=1):\n optimizer = SGD(model.parameters(), lr=lr)\n iterate_rec = []\n grad_rec = []\n for epoch in range(n_epochs):\n # Iterate over the dataset one sample at a time:\n # batch_size=1\n for this_x, this_y in zip(x, y):\n this_x = this_x[None, :]\n this_y = this_y[None, :]\n optimizer.zero_grad()\n pred = model(this_x)\n loss = mse_loss(pred, this_y)\n loss.backward()\n with torch.no_grad():\n iterate_rec.append(\n [model.w1.clone()[0], model.w2.clone()[0]]\n )\n grad_rec.append(\n [model.w1.grad.clone()[0], model.w2.grad.clone()[0]]\n )\n optimizer.step()\n return np.array(iterate_rec), np.array(grad_rec)\ninit = torch.FloatTensor([3, -4])\nmodel = SimpleMLP(init)\niterate_rec, grad_rec = train(model, x, y, lr=.01)\nprint(iterate_rec[-1])\nExplanation: Let's define our train loop and train our model:\nEnd of explanation\nimport matplotlib.colors as colors\nclass LevelsNormalize(colors.Normalize):\n def __init__(self, levels, clip=False):\n self.levels = levels\n vmin, vmax = levels[0], levels[-1]\n colors.Normalize.__init__(self, vmin, vmax, clip)\n def __call__(self, value, clip=None):\n quantiles = np.linspace(0, 1, len(self.levels))\n return np.ma.masked_array(np.interp(value, self.levels, quantiles))\ndef plot_map(W1, W2, risks, emp_risk, exp_risk, sample, iter_):\n all_risks = np.concatenate((emp_risk.ravel(), exp_risk.ravel()))\n x_center, y_center = emp_risk.shape[0] // 2, emp_risk.shape[1] // 2\n risk_at_center = exp_risk[x_center, y_center]\n low_levels = np.percentile(all_risks[all_risks <= risk_at_center],\n q=np.linspace(0, 100, 11))\n high_levels = np.percentile(all_risks[all_risks > risk_at_center],\n q=np.linspace(10, 100, 10))\n levels = np.concatenate((low_levels, high_levels))\n norm = LevelsNormalize(levels=levels)\n cmap = plt.get_cmap('RdBu_r')\n fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(12, 4))\n risk_levels = levels.copy()\n risk_levels[0] = min(risks[sample].min(), risk_levels[0])\n risk_levels[-1] = max(risks[sample].max(), risk_levels[-1])\n ax1.contourf(W1, W2, risks[sample], levels=risk_levels,\n norm=norm, cmap=cmap)\n ax1.scatter(iterate_rec[iter_, 0], iterate_rec[iter_, 1],\n color='orange')\n if any(grad_rec[iter_] != 0):\n ax1.arrow(iterate_rec[iter_, 0], iterate_rec[iter_, 1],\n -0.1 * grad_rec[iter_, 0], -0.1 * grad_rec[iter_, 1],\n head_width=0.3, head_length=0.5, fc='orange', ec='orange')\n ax1.set_title('Pointwise risk')\n ax2.contourf(W1, W2, emp_risk, levels=levels, norm=norm, cmap=cmap)\n ax2.plot(iterate_rec[:iter_ + 1, 0], iterate_rec[:iter_ + 1, 1],\n linestyle='-', marker='o', markersize=6,\n color='orange', linewidth=2, label='SGD trajectory')\n ax2.legend()\n ax2.set_title('Empirical risk')\n cf = ax3.contourf(W1, W2, exp_risk, levels=levels, norm=norm, cmap=cmap)\n ax3.scatter(iterate_rec[iter_, 0], iterate_rec[iter_, 1],\n color='orange', label='Current sample')\n ax3.set_title('Expected risk (ground truth)')\n plt.colorbar(cf, ax=ax3)\n ax3.legend()\n fig.suptitle('Iter %i, sample % i' % (iter_, sample))\n plt.show()\nfor sample in range(0, 100, 10):\n plot_map(W1, W2, risks, empirical_risk, expected_risk, sample, sample)\nExplanation: We now plot:\n- the point-wise risk at iteration $k$ on the left plot\n- the total empirical risk on the center plot\n- the expected risk on the right plot\nObserve how empirical and expected risk differ, and how empirical risk minimization is not totally equivalent to expected risk minimization.\nEnd of explanation\n# %load solutions/linear_mlp.py\nExplanation: Observe and comment.\nExercices:\nChange the model to a completely linear one and reproduce the plots. What change do you observe regarding the plot of the stochastic loss landscape?\nTry to initialize the model with pathological weights, e.g., symmetric ones. What do you observe?\nYou may increase the number of epochs to observe slow convergence phenomena\nTry augmenting the noise in the dataset. What do you observe?\nEnd of explanation\n# from matplotlib.animation import FuncAnimation\n# from IPython.display import HTML\n# fig, ax = plt.subplots(figsize=(8, 8))\n# all_risks = np.concatenate((empirical_risk.ravel(),\n# expected_risk.ravel()))\n# x_center, y_center = empirical_risk.shape[0] // 2, empirical_risk.shape[1] // 2\n# risk_at_center = expected_risk[x_center, y_center]\n# low_levels = np.percentile(all_risks[all_risks <= risk_at_center],\n# q=np.linspace(0, 100, 11))\n# high_levels = np.percentile(all_risks[all_risks > risk_at_center],\n# q=np.linspace(10, 100, 10))\n# levels = np.concatenate((low_levels, high_levels))\n# norm = LevelsNormalize(levels=levels)\n# cmap = plt.get_cmap('RdBu_r')\n# ax.set_title('Pointwise risk')\n# def animate(i):\n# for c in ax.collections:\n# c.remove()\n# for l in ax.lines:\n# l.remove()\n# for p in ax.patches:\n# p.remove()\n# risk_levels = levels.copy()\n# risk_levels[0] = min(risks[i].min(), risk_levels[0])\n# risk_levels[-1] = max(risks[i].max(), risk_levels[-1])\n# ax.contourf(W1, W2, risks[i], levels=risk_levels,\n# norm=norm, cmap=cmap)\n# ax.plot(iterate_rec[:i + 1, 0], iterate_rec[:i + 1, 1],\n# linestyle='-', marker='o', markersize=6,\n# color='orange', linewidth=2, label='SGD trajectory')\n# return []\n# anim = FuncAnimation(fig, animate,# init_func=init,\n# frames=100, interval=300, blit=True)\n# anim.save(\"stochastic_landscape_minimal_mlp.mp4\")\n# plt.close(fig)\n# HTML(anim.to_html5_video())\n# fig, ax = plt.subplots(figsize=(8, 7))\n# cf = ax.contourf(W1, W2, empirical_risk, levels=levels, norm=norm, cmap=cmap)\n# ax.plot(iterate_rec[:100 + 1, 0], iterate_rec[:100 + 1, 1],\n# linestyle='-', marker='o', markersize=6,\n# color='orange', linewidth=2, label='SGD trajectory')\n# ax.legend()\n# plt.colorbar(cf, ax=ax)\n# ax.set_title('Empirical risk')\n# fig.savefig('empirical_loss_landscape_minimal_mlp.png')\nExplanation: Utilities to generate the slides figures\nEnd of explanation"}}},{"rowIdx":2165,"cells":{"Unnamed: 0":{"kind":"number","value":2165,"string":"2,165"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Python II\nWiederholung\nStep1: 2 Viel mächtigere Funktion\nStep2: 3 Aber wie sind Funktion, Modules und Libraries aufgebaut?\nStep3: 4 Bauen wir die eigenen Funktion\nBauen wir ganze Sätze, aus Listen von Strings\nStep4: Und zum aufrufen packe ich meine List in Klammen ()\nStep5: Bauen wir eine simple Suche\nStep6: 5 Struktur und Troubleshooting\nZuerst die Imports\nDann die eigenen Funktionen\nNun der eigentliche Code"},"code_prompt":{"kind":"string","value":"Python Code:\nlst = [11,2,34, 4,5,5111]\nlen(lst)\nlen([11,2,'sort',4,5,5111])\nsorted(lst)\nlst\nlst.sort()\nlst\nmin(lst)\nmax(lst)\nstr(1212)\nsum([1,2,2])\nlst\nlst.remove(4)\nlst.append(4)\nstring = 'hello, wie geht, es Dir?'\nstring.split(',')\nExplanation: Python II\nWiederholung: die wichtigsten Funktion\nViel mächtigere Funktion: Modules und Libraries\nSchauen wir uns diese simplen Funktionen genauer an\nBauen wir die eigenen Funktionen\nStruktur und Troubleshooting\n1 Wichtigste Funktionen\nEine Übersicht der 64 wichtigsten simplen Python-Funktionen sind hier gelistet.\nEnd of explanation\nimport urllib \nimport requests\nimport glob\nimport pandas\nfrom bs4 import BeautifulSoup\nimport re\n#etc. etc.\ndef sort(string):\n elem = input('Bitte geben Sie den Suchbegriff ein: ')\n if elem in string:\n return 'Treffer'\n else:\n return 'Kein Treffer'\nstring_test = \"«Guten Tag, ich bin der, der Sie vor einer Stunde geweckt hat», sagte der Moderator des Podiums in Stockholm, als er am Montagmittag den US-Wissenschaftler Richard H. Thaler anrief. Für seine Erforschung der Psychologie hinter wirtschaftlichen Entscheidungen bekommt dieser den Nobelpreis für Wirtschaft. Das gab die Königlich-Schwedische Wissenschaftsakademie bekannt. Der 72-Jährige lehrt an der Universität Chicago. Der Verhaltensökonom habe gezeigt, dass begrenzte Rationalität, soziale.\"\nstring_test\ndef suche(elem, string):\n #elem = input('Bitte geben Sie den Suchbegriff ein: ')\n if elem in string:\n return 'Treffer'\n else:\n return 'Kein Treffer'\n \nsuche(strings[1], string_test)\nstrings = ['Stockholm', 'blödes Wort', 'Rationalität', 'soziale']\nfor st in strings:\n ergebnis = suche(st, string_test)\n print(st, ergebnis)\nsuche(string_test)\nsuche(string_test)\nlst = [1,3,5]\nlen(lst)\nExplanation: 2 Viel mächtigere Funktion: Modules und Libraries\nModules & Libraries\nEnd of explanation\nimport os\n#Funktioniert leider nicht mit allen Built in Functionen\nos.path.split??\n#Beispiel Sort\ndef sort(list):\n for index in range(1,len(list)):\n value = list[index]\n i = index-1\n while i>=0:\n if value < list[i]:\n list[i+1] = list[i]\n list[i] = value\n i -= 1\n else:\n break\n return list\n#Ganz komplexe. Wenn Du nicht mit dem Modul urllib, bzw. urlretrieve \n#arbeiten könntest, müsstest Du jetzt all das eintippen.\ndef urlretrieve(url, filename=None, reporthook=None, data=None):\n url_type, path = splittype(url)\n with contextlib.closing(urlopen(url, data)) as fp:\n headers = fp.info()\n # Just return the local path and the \"headers\" for file://\n # URLs. No sense in performing a copy unless requested.\n if url_type == \"file\" and not filename:\n return os.path.normpath(path), headers\n # Handle temporary file setup.\n if filename:\n tfp = open(filename, 'wb')\n else:\n tfp = tempfile.NamedTemporaryFile(delete=False)\n filename = tfp.name\n _url_tempfiles.append(filename)\n with tfp:\n result = filename, headers\n bs = 1024*8\n size = -1\n read = 0\n blocknum = 0\n if \"content-length\" in headers:\n size = int(headers[\"Content-Length\"])\n if reporthook:\n reporthook(blocknum, bs, size)\n while True:\n block = fp.read(bs)\n if not block:\n break\n read += len(block)\n tfp.write(block)\n blocknum += 1\n if reporthook:\n reporthook(blocknum, bs, size)\n if size >= 0 and read < size:\n raise ContentTooShortError(\n \"retrieval incomplete: got only %i out of %i bytes\"\n % (read, size), result)\n return result\nimport urllib.request\nwith urllib.request.urlopen('http://tagesanzeiger.ch/') as response:\n html = response.read()\nhtml\nExplanation: 3 Aber wie sind Funktion, Modules und Libraries aufgebaut?\nEnd of explanation\nlst = ['ich', 'habe', None, 'ganz', 'kalt']\ndef join(mylist):\n long_str = ''\n for elem in mylist:\n try:\n long_str = long_str + elem + \" \"\n except:\n None \n return long_str.strip()\njoin(lst)\nExplanation: 4 Bauen wir die eigenen Funktion\nBauen wir ganze Sätze, aus Listen von Strings\nEnd of explanation\njoin(lst)\nstring = ' ich habe ganz kalt '\nstring.strip()\nExplanation: Und zum aufrufen packe ich meine List in Klammen ()\nEnd of explanation\nsatz = \"Die Unabhängigkeit der Notenbanken von der Politik gilt bisher als anerkannter Grundpfeiler der modernen Wirtschafts- und Geldpolitik in fortgeschrittenen Volkswirtschaften. Zu gross wäre sonst das Risiko, dass gewählte Politiker die Notenpresse anwerfen, wenn es ihren persönlichen Zielen gerade gelegen kommt, und dass dadurch die Stabilität des Geldes und das Vertrauen in das Zahlungsmittel untergraben wird.\"\nsort(satz)\ndef find(string):\n elem = input('Bitte geben Sie den Suchbegriff ein: ')\n if elem in string:\n return 'Treffer'\n else:\n return 'Kein Treffer'\nfind(satz)\nExplanation: Bauen wir eine simple Suche\nEnd of explanation\nprint('Immer im Code verwenden, um zu wissen wo der Fehler nun ganz genau passiert.')\n#Beispiel Sort\ndef sort(list):\n for index in range(1,len(list)):\n value = list[index]\n print(value)\n i = index-1\n print(i)\n while i>=0:\n if value < list[i]:\n list[i+1] = list[i]\n list[i] = value\n i -= 1\n else:\n break\n return list\nsort(lst)\nlst\nExplanation: 5 Struktur und Troubleshooting\nZuerst die Imports\nDann die eigenen Funktionen\nNun der eigentliche Code\nEnd of explanation"}}},{"rowIdx":2166,"cells":{"Unnamed: 0":{"kind":"number","value":2166,"string":"2,166"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Class 27 - Boolean Networks\nStep1: Define a function hamming.dist that gives the hamming distance between two states of the Boolean network (as numpy arrays of ones and zeroes)\nStep2: Define a function evolve that takes the network from one Boolean vector state to another Boolean vector state\nStep3: Write a function that runs 10,000 simulations of the network. In each simulation, the procedure is"},"code_prompt":{"kind":"string","value":"Python Code:\nimport numpy\nnodes = ['Cell Size', \n 'Cln3', \n 'MBF', \n 'Clb5,6', \n 'Mcm1/SFF', \n 'Swi5', \n 'Sic1', \n 'Clb1,2', \n 'Cdc20&Cdc14', \n 'Cdh1', \n 'Cln1,2', \n 'SBF']\nN = len(nodes)\n# define the transition matrix \na = numpy.zeros([N, N])\na[0,1] = 1\na[1,1] = -1\na[1,2] = 1\na[1,11] = 1\na[2,3] = 1\na[3,4] = 1\na[3,6] = -1\na[3,7] = 1\na[3,9] = -1\na[4,4] = -1\na[4,5] = 1\na[4,7] = 1\na[4,8] = 1\na[5,5] = -1\na[5,6] = 1\na[6,3] = -1\na[6,7] = -1\na[7,2] = -1\na[7,4] = 1\na[7,5] = -1\na[7,6] = -1\na[7,8] = 1\na[7,9] = -1\na[7,11] = -1\na[8,3] = -1\na[8,5] = 1\na[8,6] = 1\na[8,7] = -1\na[8,8] = -1\na[8,9] = 1\na[9,7] = -1\na[10,6] = -1\na[10,9] = -1\na[10,10] = -1\na[11,10] = 1\na = numpy.matrix(a)\n# define the matrix of states for the fixed points\nnum_fp = 7\nfixed_points = numpy.zeros([num_fp, N])\nfixed_points[0, 6] = 1\nfixed_points[0, 9] = 1\nfixed_points[1, 10] = 1\nfixed_points[1, 11] = 1\nfixed_points[2, 2] = 1\nfixed_points[2, 6] = 1\nfixed_points[2, 9] = 1\nfixed_points[3, 6] = 1\nfixed_points[4, 2] = 1\nfixed_points[4, 6] = 1\nfixed_points[6, 9] = 1\nfixed_points = numpy.matrix(fixed_points)\nbasin_counts = numpy.zeros(num_fp)\nExplanation: Class 27 - Boolean Networks\nEnd of explanation\ndef hamming_dist(x1, x2):\n return np.sum(np.abs(x1-x2))\nExplanation: Define a function hamming.dist that gives the hamming distance between two states of the Boolean network (as numpy arrays of ones and zeroes)\nEnd of explanation\ndef evolve(state):\n result = numpy.array(a.transpose().dot(state))\n result = numpy.reshape(result, N)\n result[result > 0] = 1\n result[result == 0] = state[result == 0]\n result[result < 0] = 0\n return result\nExplanation: Define a function evolve that takes the network from one Boolean vector state to another Boolean vector state\nEnd of explanation\nimport itertools\nimport random\nbasin_ids = []\nfor _ in itertools.repeat(None, 10000):\n state = [0]\n for pos in range(0, (N-1)):\n state.append(random.randint(0,1))\n state = numpy.array(state)\n state_new = numpy.array([-1]*N)\n while(True):\n state_new = evolve(state)\n if hamming_dist(state, state_new) == 0:\n break\n state = state_new\n for j in range(0, num_fp):\n fp_state = numpy.array(fixed_points[j,])\n fp_state = numpy.reshape(fp_state, N)\n if hamming_dist(state, fp_state) == 0:\n basin_ids.append(j)\nnumpy.bincount(basin_ids)\nExplanation: Write a function that runs 10,000 simulations of the network. In each simulation, the procedure is:\n- create a random binary vector of length 12, and call that vector state (make sure the zeroth element is set to zero)\n- iteratively call \"evolve\", passing the state to evolve and then updating state with the return value from evolve\n- check if state changes in the last call to evolve; if it does not, then you have reached a fixed point; stop iterating\n- compare the state to the rows of fixed_points; for the unique row i for which you find a match, increment the element in position i of basin_counts\n- print out basin_counts\nEnd of explanation"}}},{"rowIdx":2167,"cells":{"Unnamed: 0":{"kind":"number","value":2167,"string":"2,167"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Dark Energy Spectroscopic Instrument\nSome calculations to assist with building the DESI model from an existing ZEMAX model and other sources.\nYou can safely ignore this if you just want to use the model.\nStep1: Load the DESI model\nStep2: Corrector Internal Baffles\nSetup YAML to preserve dictionary order and trunctate distances (in meters) to 5 digits\nStep3: Define the corrector internal baffle apertures, from DESI-4103-v1. These have been checked against DESI-4037-v6, with the extra baffle between ADC1 and ADC2 added\nStep4: Calculate batoid Baffle surfaces for the corrector. These are mechanically planar, but that would put their (planar) center inside a lens, breaking the sequential tracing model. We fix this by use spherical baffle surfaces that have the same apertures. This code was originally used to read a batoid model without baffles, but also works if the baffles are already added.\nStep5: Validate that the baffle edges in the final model have the correct apertures\nStep6: Corrector Cage and Spider\nCalculate simplified vane coordinates using parameters from DESI-4110-v1\nStep7: Plot \"User Aperture Data\" from the ZEMAX \"spider\" surface 6, as cross check"},"code_prompt":{"kind":"string","value":"Python Code:\nimport batoid\nimport numpy as np\nimport yaml\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n%matplotlib inline\nExplanation: Dark Energy Spectroscopic Instrument\nSome calculations to assist with building the DESI model from an existing ZEMAX model and other sources.\nYou can safely ignore this if you just want to use the model.\nEnd of explanation\nfiducial_telescope = batoid.Optic.fromYaml(\"DESI.yaml\")\nExplanation: Load the DESI model:\nEnd of explanation\nimport collections\ndef dict_representer(dumper, data):\n return dumper.represent_dict(data.items())\nyaml.Dumper.add_representer(collections.OrderedDict, dict_representer)\ndef float_representer(dumper, value):\n return dumper.represent_scalar(u'tag:yaml.org,2002:float', f'{value:.5f}')\nyaml.Dumper.add_representer(float, float_representer)\nExplanation: Corrector Internal Baffles\nSetup YAML to preserve dictionary order and trunctate distances (in meters) to 5 digits:\nEnd of explanation\n# baffle z-coordinates relative to FP in mm from DESI-4103-v1, checked\n# against DESI-4037-v6 (and with extra ADC baffle added).\nZBAFFLE = np.array([\n 2302.91, 2230.29, 1916.86, 1823.57, 1617.37, 1586.76, 1457.88, 1349.45, 1314.68,\n 1232.06, 899.67, 862.08, 568.81, 483.84, 415.22]) \n# baffle radii in mm from DESI-4103-v1, checked\n# against DESI-4037-v6 (and with extra ADC baffle added).\nRBAFFLE = np.array([\n 558.80, 544.00, 447.75, 417.00, 376.00, 376.00, 378.00, 378.00, 395.00,\n 403.00, 448.80, 453.70, 492.00, 501.00, 496.00])\nExplanation: Define the corrector internal baffle apertures, from DESI-4103-v1. These have been checked against DESI-4037-v6, with the extra baffle between ADC1 and ADC2 added:\nEnd of explanation\ndef baffles(nindent=10):\n indent = ' ' * nindent\n # Measure z from C1 front face in m.\n zbaffle = 1e-3 * (2425.007 - ZBAFFLE)\n # Convert r from mm to m.\n rbaffle = 1e-3 * RBAFFLE\n # By default, all baffles are planar.\n nbaffles = len(zbaffle)\n baffles = []\n for i in range(nbaffles):\n baffle = collections.OrderedDict()\n baffle['type'] = 'Baffle'\n baffle['name'] = f'B{i+1}'\n baffle['coordSys'] = {'z': float(zbaffle[i])}\n baffle['surface'] = {'type': 'Plane'}\n baffle['obscuration'] = {'type': 'ClearCircle', 'radius': float(rbaffle[i])}\n baffles.append(baffle)\n # Loop over corrector lenses.\n corrector = fiducial_telescope['DESI.Hexapod.Corrector']\n lenses = 'C1', 'C2', 'ADC1rotator.ADC1', 'ADC2rotator.ADC2', 'C3', 'C4'\n for lens in lenses:\n obj = corrector['Corrector.' + lens]\n assert isinstance(obj, batoid.optic.Lens)\n front, back = obj.items[0], obj.items[1]\n fTransform = batoid.CoordTransform(front.coordSys, corrector.coordSys)\n bTransform = batoid.CoordTransform(back.coordSys, corrector.coordSys)\n _, _, zfront = fTransform.applyForwardArray(0, 0, 0)\n _, _, zback = bTransform.applyForwardArray(0, 0, 0)\n # Find any baffles \"inside\" this lens.\n inside = (zbaffle >= zfront) & (zbaffle <= zback)\n if not any(inside):\n continue\n inside = np.where(inside)[0]\n for k in inside:\n baffle = baffles[k]\n r = rbaffle[k]\n # Calculate sag at (x,y)=(0,r) to avoid effect of ADC rotation about y.\n sagf, sagb = front.surface.sag(0, r), back.surface.sag(0, r)\n _, _, zf = fTransform.applyForwardArray(0, r, sagf)\n _, _, zb = bTransform.applyForwardArray(0, r, sagb)\n if zf > zbaffle[k]:\n print(f'{indent}# Move B{k+1} in front of {obj.name} and make spherical to keep model sequential.')\n assert isinstance(front.surface, batoid.Sphere)\n baffle['surface'] = {'type': 'Sphere', 'R': front.surface.R}\n baffle['coordSys']['z'] = float(zfront - (zf - zbaffle[k]))\n elif zbaffle[k] > zb:\n print(f'{indent}# Move B{k+1} behind {obj.name} and make spherical to keep model sequential.')\n assert isinstance(back.surface, batoid.Sphere)\n baffle['surface'] = {'type': 'Sphere', 'R': back.surface.R}\n baffle['coordSys']['z'] = float(zback + (zbaffle[k] - zb))\n else:\n print(f'Cannot find a solution for B{k+1} inside {obj.name}!')\n lines = yaml.dump(baffles)\n for line in lines.split('\\n'):\n print(indent + line)\nbaffles()\nExplanation: Calculate batoid Baffle surfaces for the corrector. These are mechanically planar, but that would put their (planar) center inside a lens, breaking the sequential tracing model. We fix this by use spherical baffle surfaces that have the same apertures. This code was originally used to read a batoid model without baffles, but also works if the baffles are already added.\nEnd of explanation\ndef validate_baffles():\n corrector = fiducial_telescope['DESI.Hexapod.Corrector']\n for i in range(len(ZBAFFLE)):\n baffle = corrector[f'Corrector.B{i+1}']\n # Calculate surface z at origin in corrector coordinate system.\n _, _, z = batoid.CoordTransform(baffle.coordSys, corrector.coordSys).applyForwardArray(0, 0, 0)\n # Calculate surface z at (r,0) in corrector coordinate system.\n sag = baffle.surface.sag(1e-3 * RBAFFLE[i], 0)\n z += sag\n # Measure from FP in mm.\n z = np.round(2425.007 - 1e3 * z, 2)\n assert z == ZBAFFLE[i], baffle.name\n \nvalidate_baffles()\nExplanation: Validate that the baffle edges in the final model have the correct apertures:\nEnd of explanation\ndef spider(dmin=1762, dmax=4940.3, ns_angle=77, widths=[28.5, 28.5, 60., 19.1],\n wart_r=958, wart_dth=6, wart_w=300):\n # Vane order is [NE, SE, SW, NW], with N along -y and E along +x.\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.add_artist(plt.Circle((0, 0), 0.5 * dmax, color='yellow'))\n ax.add_artist(plt.Circle((0, 0), 0.5 * dmin, color='gray'))\n ax.set_xlim(-0.5 * dmax, 0.5 * dmax)\n ax.set_ylim(-0.5 * dmax, 0.5 * dmax)\n \n # Place outer vertices equally along the outer ring at NE, SE, SW, NW.\n xymax = 0.5 * dmax * np.array([[1, -1], [1, 1], [-1, 1], [-1, -1]]) / np.sqrt(2)\n # Calculate inner vertices so that the planes of the NE and NW vanes intersect\n # with an angle of ns_angle (same for the SE and SW planes).\n angle = np.deg2rad(ns_angle)\n x = xymax[1, 0]\n dx = xymax[1, 1] * np.tan(0.5 * angle)\n xymin = np.array([[x - dx, 0], [x - dx, 0], [-x+dx, 0], [-x+dx, 0]])\n for i in range(4):\n plt.plot([xymin[i,0], xymax[i,0]], [xymin[i,1], xymax[i,1]], '-', lw=0.1 * widths[i])\n # Calculate batoid rectangle params for the vanes.\n xy0 = 0.5 * (xymin + xymax)\n heights = np.sqrt(np.sum((xymax - xymin) ** 2, axis=1))\n # Calculate wart rectangle coords.\n wart_h = 2 * (wart_r - 0.5 * dmin)\n wart_dth = np.deg2rad(wart_dth)\n wart_xy = 0.5 * dmin * np.array([-np.sin(wart_dth), np.cos(wart_dth)])\n plt.plot(*wart_xy, 'rx', ms=25)\n # Print batoid config.\n indent = ' ' * 10\n print(f'{indent}-\\n{indent} type: ClearAnnulus')\n print(f'{indent} inner: {np.round(0.5e-3 * dmin, 5)}')\n print(f'{indent} outer: {np.round(0.5e-3 * dmax, 5)}')\n for i in range(4):\n print(f'{indent}-\\n{indent} type: ObscRectangle')\n print(f'{indent} x: {np.round(1e-3 * xy0[i, 0], 5)}')\n print(f'{indent} y: {np.round(1e-3 * xy0[i, 1], 5)}')\n print(f'{indent} width: {np.round(1e-3 * widths[i], 5)}')\n print(f'{indent} height: {np.round(1e-3 * heights[i], 5)}')\n dx, dy = xymax[i] - xymin[i]\n angle = np.arctan2(-dx, dy)\n print(f'{indent} theta: {np.round(angle, 5)}')\n print(f'-\\n type: ObscRectangle')\n print(f' x: {np.round(1e-3 * wart_xy[0], 5)}')\n print(f' y: {np.round(1e-3 * wart_xy[1], 5)}')\n print(f' width: {np.round(1e-3 * wart_w, 5)}')\n print(f' height: {np.round(1e-3 * wart_h, 5)}')\n print(f' theta: {np.round(wart_dth, 5)}')\n \nspider()\nExplanation: Corrector Cage and Spider\nCalculate simplified vane coordinates using parameters from DESI-4110-v1:\nEnd of explanation\ndef plot_obs():\n wart1 = np.array([\n [ -233.22959, 783.94254],\n [-249.32698, 937.09892],\n [49.02959, 968.45746],\n [ 65.126976, 815.30108],\n [ -233.22959, 783.94254],\n ])\n wart2 = np.array([\n [-233.22959, 783.94254],\n [ -249.32698, 937.09892],\n [49.029593, 968.45746],\n [65.126976, 815.30108],\n [-233.22959, 783.94254],\n ])\n vane1 = np.array([\n [363.96554,-8.8485008],\n [341.66121, 8.8931664],\n [1713.4345, 1733.4485],\n [1735.7388, 1715.7068],\n [363.96554,-8.8485008],\n ])\n vane2 = np.array([\n [-1748.0649, 1705.9022],\n [ -1701.1084, 1743.2531],\n [ -329.33513, 18.697772],\n [ -376.29162, -18.653106],\n [-1748.0649, 1705.9022],\n ])\n vane3 = np.array([\n [ -1717.1127, -1730.5227],\n [ -1732.0605, -1718.6327],\n [ -360.28728, 5.922682],\n [-345.33947, -5.9673476],\n [ -1717.1127, -1730.5227],\n ])\n vane4 = np.array([\n [ 341.66121, -8.8931664],\n [363.96554, 8.8485008],\n [1735.7388, -1715.7068],\n [1713.4345, -1733.4485],\n [ 341.66121, -8.8931664],\n ])\n extra = np.array([\n [ 2470 , 0 ],\n [ 2422.5396 , -481.8731 ],\n [ 2281.9824 , -945.22808 ],\n [ 2053.7299 , -1372.2585 ],\n [ 1746.5537 , -1746.5537 ],\n [ 1372.2585 , -2053.7299 ],\n [ 945.22808 , -2281.9824 ],\n [ 481.8731 , -2422.5396 ],\n [ 3.0248776e-13 , -2470 ],\n [ -481.8731 , -2422.5396 ],\n [ -945.22808 , -2281.9824 ],\n [ -1372.2585 , -2053.7299 ],\n [ -1746.5537 , -1746.5537 ],\n [ -2053.7299 , -1372.2585 ],\n [ -2281.9824 , -945.22808 ],\n [ -2422.5396 , -481.8731 ],\n [ -2470 , 2.9882133e-12 ],\n [ -2422.5396 , 481.8731 ],\n [ -2281.9824 , 945.22808 ],\n [ -2053.7299 , 1372.2585 ],\n [ -1746.5537 , 1746.5537 ],\n [ -1372.2585 , 2053.7299 ],\n [ -945.22808 , 2281.9824 ],\n [ -481.8731 , 2422.5396 ],\n [ 5.9764266e-12 , 2470 ],\n [ 481.8731 , 2422.5396 ],\n [ 945.22808 , 2281.9824 ],\n [ 1372.2585 , 2053.7299 ],\n [ 1746.5537 , 1746.5537 ],\n [ 2053.7299 , 1372.2585 ],\n [ 2281.9824 , 945.22808 ],\n [ 2422.5396 , 481.8731 ],\n [ 2470 , -1.0364028e-11 ],\n [ 2724 , 0 ],\n [ 2671.6591 , -531.42604 ],\n [ 2516.6478 , -1042.4297 ],\n [ 2264.9232 , -1513.3733 ],\n [ 1926.1589 , -1926.1589 ],\n [ 1513.3733 , -2264.9232 ],\n [ 1042.4297 , -2516.6478 ],\n [ 531.42604 , -2671.6591 ],\n [ 3.3359379e-13 , -2724 ],\n [ -531.42604 , -2671.6591 ],\n [ -1042.4297 , -2516.6478 ],\n [ -1513.3733 , -2264.9232 ],\n [ -1926.1589 , -1926.1589 ],\n [ -2264.9232 , -1513.3733 ],\n [ -2516.6478 , -1042.4297 ],\n [ -2671.6591 , -531.42604 ],\n [ -2724 , 3.2955032e-12 ],\n [ -2671.6591 , 531.42604 ],\n [ -2516.6478 , 1042.4297 ],\n [ -2264.9232 , 1513.3733 ],\n [ -1926.1589 , 1926.1589 ],\n [ -1513.3733 , 2264.9232 ],\n [ -1042.4297 , 2516.6478 ],\n [ -531.42604 , 2671.6591 ],\n [ 6.5910065e-12 , 2724 ],\n [ 531.42604 , 2671.6591 ],\n [ 1042.4297 , 2516.6478 ],\n [ 1513.3733 , 2264.9232 ],\n [ 1926.1589 , 1926.1589 ],\n [ 2264.9232 , 1513.3733 ],\n [ 2516.6478 , 1042.4297 ],\n [ 2671.6591 , 531.42604 ],\n [ 2724 , -1.1429803e-11 ],\n [ 2470 , 0 ],\n ])\n plt.figure(figsize=(20, 20))\n plt.plot(*wart1.T)\n plt.plot(*wart2.T)\n plt.plot(*vane1.T)\n plt.plot(*vane2.T)\n plt.plot(*vane3.T)\n plt.plot(*vane4.T)\n plt.plot(*extra.T)\n w = 1762./2.\n plt.gca().add_artist(plt.Circle((0, 0), w, color='gray'))\n plt.gca().set_aspect(1.)\n \nplot_obs()\nExplanation: Plot \"User Aperture Data\" from the ZEMAX \"spider\" surface 6, as cross check:\nEnd of explanation"}}},{"rowIdx":2168,"cells":{"Unnamed: 0":{"kind":"number","value":2168,"string":"2,168"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n \n
\n
Copyright 2019. Created by Jose Marcial Portilla.
\nCNN on Custom Images\nFor this exercise we're using a collection of Cats and Dogs images inspired by the classic \nRecall that imshow clips pixel values <0, so the resulting display lacks contrast. We'll apply a quick inverse transform to the input tensor so that images show their \"true\" colors.\nStep4: Define the model\nWe'll start by using a model similar to the one we applied to the CIFAR-10 dataset, except that here we have a binary classification (2 output channels, not 10). Also, we'll add another set of convolution/pooling layers.\nStep5:
Why (54x54x16)?
\nWith 224 pixels per side, the kernels and pooling layers result in $\\;(((224-2)/2)-2)/2 = 54.5\\;$ which rounds down to 54 pixels per side.
\nInstantiate the model, define loss and optimization functions\nWe're going to call our model \"CNNmodel\" to differentiate it from an \"AlexNetmodel\" we'll use later.\nStep6: Looking at the trainable parameters\nStep7: Train the model\nIn the interests of time, we'll limit the number of training batches to 800, and the number of testing batches to 300. We'll train the model on 8000 of 18743 available images, and test it on 3000 out of 6251 images.\nStep8: Save the trained model\nStep9: Evaluate model performance\nStep10: Download a pretrained model\nTorchvision has a number of proven models available through
\nNote that when we assign new layers, their parameters default to .requires_grad=True.\nStep13: Define loss function & optimizer\nWe only want to optimize the classifier parameters, as the feature parameters are frozen.\nStep14: Train the model\nRemember, we're only training the fully connected layers. The convolutional layers have fixed weights and biases. For this reason, we only need to run one epoch.\nStep15: Run a new image through the model\nWe can also pass a single image through the model to obtain a prediction.
\nPick a number from 0 to 6250, assign it to \"x\", and we'll use that value to select an image from the Cats and Dogs test set."},"code_prompt":{"kind":"string","value":"Python Code:\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, models # add models to the list\nfrom torchvision.utils import make_grid\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n# ignore harmless warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nExplanation: \n
\n
Copyright 2019. Created by Jose Marcial Portilla.
\nCNN on Custom Images\nFor this exercise we're using a collection of Cats and Dogs images inspired by the classic
Kaggle competition.\nIn the last section we downloaded the files, looked at the directory structure, examined the images, and performed a variety of transforms in preparation for training.\nIn this section we'll define our model, then feed images through a training and validation sequence using DataLoader.\nImage files directory tree\n
.\n└── Data\n    └── CATS_DOGS\n        ├── test\n        │   ├── CAT\n        │   │   ├── 9374.jpg\n        │   │   ├── 9375.jpg\n        │   │   └── ... (3,126 files)\n        │   └── DOG\n        │       ├── 9374.jpg\n        │       ├── 9375.jpg\n        │       └── ... (3,125 files)       \n        │           \n        └── train\n            ├── CAT\n            │   ├── 0.jpg\n            │   ├── 1.jpg\n            │   └── ... (9,371 files)\n            └── DOG\n                ├── 0.jpg\n                ├── 1.jpg\n                └── ... (9,372 files)
\nPerform standard imports\nEnd of explanation\ntrain_transform = transforms.Compose([\n transforms.RandomRotation(10), # rotate +/- 10 degrees\n transforms.RandomHorizontalFlip(), # reverse 50% of images\n transforms.Resize(224), # resize shortest side to 224 pixels\n transforms.CenterCrop(224), # crop longest side to 224 pixels at center\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\ntest_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\nExplanation: Define transforms\nIn the previous section we looked at a variety of transforms available for data augmentation (rotate, flip, etc.) and normalization.
\nHere we'll combine the ones we want, including the recommended normalization parameters for mean and std per channel.\nEnd of explanation\nroot = '../Data/CATS_DOGS'\ntrain_data = datasets.ImageFolder(os.path.join(root, 'train'), transform=train_transform)\ntest_data = datasets.ImageFolder(os.path.join(root, 'test'), transform=test_transform)\ntorch.manual_seed(42)\ntrain_loader = DataLoader(train_data, batch_size=10, shuffle=True)\ntest_loader = DataLoader(test_data, batch_size=10, shuffle=True)\nclass_names = train_data.classes\nprint(class_names)\nprint(f'Training images available: {len(train_data)}')\nprint(f'Testing images available: {len(test_data)}')\nExplanation: Prepare train and test sets, loaders\nWe're going to take advantage of a built-in torchvision dataset tool called ImageFolder.\nEnd of explanation\n# Grab the first batch of 10 images\nfor images,labels in train_loader: \n break\n# Print the labels\nprint('Label:', labels.numpy())\nprint('Class:', *np.array([class_names[i] for i in labels]))\nim = make_grid(images, nrow=5) # the default nrow is 8\n# Inverse normalize the images\ninv_normalize = transforms.Normalize(\n mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],\n std=[1/0.229, 1/0.224, 1/0.225]\n)\nim_inv = inv_normalize(im)\n# Print the images\nplt.figure(figsize=(12,4))\nplt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)));\nExplanation: Display a batch of images\nTo verify that the training loader selects cat and dog images at random, let's show a batch of loaded images.
\nRecall that imshow clips pixel values <0, so the resulting display lacks contrast. We'll apply a quick inverse transform to the input tensor so that images show their \"true\" colors.\nEnd of explanation\nclass ConvolutionalNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\n self.fc1 = nn.Linear(54*54*16, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 2)\n def forward(self, X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X, 2, 2)\n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X, 2, 2)\n X = X.view(-1, 54*54*16)\n X = F.relu(self.fc1(X))\n X = F.relu(self.fc2(X))\n X = self.fc3(X)\n return F.log_softmax(X, dim=1)\nExplanation: Define the model\nWe'll start by using a model similar to the one we applied to the CIFAR-10 dataset, except that here we have a binary classification (2 output channels, not 10). Also, we'll add another set of convolution/pooling layers.\nEnd of explanation\ntorch.manual_seed(101)\nCNNmodel = ConvolutionalNetwork()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)\nCNNmodel\nExplanation:
Why (54x54x16)?
\nWith 224 pixels per side, the kernels and pooling layers result in $\\;(((224-2)/2)-2)/2 = 54.5\\;$ which rounds down to 54 pixels per side.
\nInstantiate the model, define loss and optimization functions\nWe're going to call our model \"CNNmodel\" to differentiate it from an \"AlexNetmodel\" we'll use later.\nEnd of explanation\ndef count_parameters(model):\n params = [p.numel() for p in model.parameters() if p.requires_grad]\n for item in params:\n print(f'{item:>8}')\n print(f'________\\n{sum(params):>8}')\ncount_parameters(CNNmodel)\nExplanation: Looking at the trainable parameters\nEnd of explanation\nimport time\nstart_time = time.time()\nepochs = 3\nmax_trn_batch = 800\nmax_tst_batch = 300\ntrain_losses = []\ntest_losses = []\ntrain_correct = []\ntest_correct = []\nfor i in range(epochs):\n trn_corr = 0\n tst_corr = 0\n \n # Run the training batches\n for b, (X_train, y_train) in enumerate(train_loader):\n \n # Limit the number of batches\n if b == max_trn_batch:\n break\n b+=1\n \n # Apply the model\n y_pred = CNNmodel(X_train)\n loss = criterion(y_pred, y_train)\n \n # Tally the number of correct predictions\n predicted = torch.max(y_pred.data, 1)[1]\n batch_corr = (predicted == y_train).sum()\n trn_corr += batch_corr\n \n # Update parameters\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # Print interim results\n if b%200 == 0:\n print(f'epoch: {i:2} batch: {b:4} [{10*b:6}/8000] loss: {loss.item():10.8f} \\\naccuracy: {trn_corr.item()*100/(10*b):7.3f}%')\n train_losses.append(loss)\n train_correct.append(trn_corr)\n # Run the testing batches\n with torch.no_grad():\n for b, (X_test, y_test) in enumerate(test_loader):\n # Limit the number of batches\n if b == max_tst_batch:\n break\n # Apply the model\n y_val = CNNmodel(X_test)\n # Tally the number of correct predictions\n predicted = torch.max(y_val.data, 1)[1] \n tst_corr += (predicted == y_test).sum()\n loss = criterion(y_val, y_test)\n test_losses.append(loss)\n test_correct.append(tst_corr)\nprint(f'\\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed\nExplanation: Train the model\nIn the interests of time, we'll limit the number of training batches to 800, and the number of testing batches to 300. We'll train the model on 8000 of 18743 available images, and test it on 3000 out of 6251 images.\nEnd of explanation\ntorch.save(CNNmodel.state_dict(), 'CustomImageCNNModel.pt')\nExplanation: Save the trained model\nEnd of explanation\nplt.plot(train_losses, label='training loss')\nplt.plot(test_losses, label='validation loss')\nplt.title('Loss at the end of each epoch')\nplt.legend();\nplt.plot([t/80 for t in train_correct], label='training accuracy')\nplt.plot([t/30 for t in test_correct], label='validation accuracy')\nplt.title('Accuracy at the end of each epoch')\nplt.legend();\nprint(test_correct)\nprint(f'Test accuracy: {test_correct[-1].item()*100/3000:.3f}%')\nExplanation: Evaluate model performance\nEnd of explanation\nAlexNetmodel = models.alexnet(pretrained=True)\nAlexNetmodel\nExplanation: Download a pretrained model\nTorchvision has a number of proven models available through torchvision.models:\n\nThese have all been trained on the ImageNet database of images. Our only task is to reduce the output of the fully connected layers from (typically) 1000 categories to just 2.\nTo access the models, you can construct a model with random weights by calling its constructor:
\n
resnet18 = models.resnet18()
\nYou can also obtain a pre-trained model by passing pretrained=True:
\n
resnet18 = models.resnet18(pretrained=True)
\nAll pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].\nFeel free to investigate the different models available. Each one will be downloaded to a cache directory the first time they're accessed - from then on they'll be available locally.\nFor its simplicity and effectiveness, we'll use AlexNet:\nEnd of explanation\nfor param in AlexNetmodel.parameters():\n param.requires_grad = False\nExplanation:
This model uses torch.nn.AdaptiveAvgPool2d(output_size) to convert the large matrix coming out of the convolutional layers to a (6x6)x256 matrix being fed into the fully connected layers.
\nFreeze feature parameters\nWe want to freeze the pre-trained weights & biases. We set .requires_grad to False so we don't backprop through them.\nEnd of explanation\ntorch.manual_seed(42)\nAlexNetmodel.classifier = nn.Sequential(nn.Linear(9216, 1024),\n nn.ReLU(),\n nn.Dropout(0.4),\n nn.Linear(1024, 2),\n nn.LogSoftmax(dim=1))\nAlexNetmodel\n# These are the TRAINABLE parameters:\ncount_parameters(AlexNetmodel)\nExplanation: Modify the classifier\nNext we need to modify the fully connected layers to produce a binary output. The section is labeled \"classifier\" in the AlexNet model.
\nNote that when we assign new layers, their parameters default to .requires_grad=True.\nEnd of explanation\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(AlexNetmodel.classifier.parameters(), lr=0.001)\nExplanation: Define loss function & optimizer\nWe only want to optimize the classifier parameters, as the feature parameters are frozen.\nEnd of explanation\nimport time\nstart_time = time.time()\nepochs = 1\nmax_trn_batch = 800\nmax_tst_batch = 300\ntrain_losses = []\ntest_losses = []\ntrain_correct = []\ntest_correct = []\nfor i in range(epochs):\n trn_corr = 0\n tst_corr = 0\n \n # Run the training batches\n for b, (X_train, y_train) in enumerate(train_loader):\n if b == max_trn_batch:\n break\n b+=1\n \n # Apply the model\n y_pred = AlexNetmodel(X_train)\n loss = criterion(y_pred, y_train)\n \n # Tally the number of correct predictions\n predicted = torch.max(y_pred.data, 1)[1]\n batch_corr = (predicted == y_train).sum()\n trn_corr += batch_corr\n \n # Update parameters\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # Print interim results\n if b%200 == 0:\n print(f'epoch: {i:2} batch: {b:4} [{10*b:6}/8000] loss: {loss.item():10.8f} \\\naccuracy: {trn_corr.item()*100/(10*b):7.3f}%')\n train_losses.append(loss)\n train_correct.append(trn_corr)\n # Run the testing batches\n with torch.no_grad():\n for b, (X_test, y_test) in enumerate(test_loader):\n if b == max_tst_batch:\n break\n # Apply the model\n y_val = AlexNetmodel(X_test)\n # Tally the number of correct predictions\n predicted = torch.max(y_val.data, 1)[1] \n tst_corr += (predicted == y_test).sum()\n loss = criterion(y_val, y_test)\n test_losses.append(loss)\n test_correct.append(tst_corr)\nprint(f'\\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed\nprint(test_correct)\nprint(f'Test accuracy: {test_correct[-1].item()*100/3000:.3f}%')\nExplanation: Train the model\nRemember, we're only training the fully connected layers. The convolutional layers have fixed weights and biases. For this reason, we only need to run one epoch.\nEnd of explanation\nx = 2019\nim = inv_normalize(test_data[x][0])\nplt.imshow(np.transpose(im.numpy(), (1, 2, 0)));\ntest_data[x][0].shape\n# CNN Model Prediction:\nCNNmodel.eval()\nwith torch.no_grad():\n new_pred = CNNmodel(test_data[x][0].view(1,3,224,224)).argmax()\nprint(f'Predicted value: {new_pred.item()} {class_names[new_pred.item()]}')\n# AlexNet Model Prediction:\nAlexNetmodel.eval()\nwith torch.no_grad():\n new_pred = AlexNetmodel(test_data[x][0].view(1,3,224,224)).argmax()\nprint(f'Predicted value: {new_pred.item()} {class_names[new_pred.item()]}')\nExplanation: Run a new image through the model\nWe can also pass a single image through the model to obtain a prediction.
\nPick a number from 0 to 6250, assign it to \"x\", and we'll use that value to select an image from the Cats and Dogs test set.\nEnd of explanation"}}},{"rowIdx":2169,"cells":{"Unnamed: 0":{"kind":"number","value":2169,"string":"2,169"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Alternate PowerShell Hosts\nMetadata\n| Metadata | Value |\n|\nStep1: Download & Process Security Dataset\nStep2: Analytic I\nWithin the classic PowerShell log, event ID 400 indicates when a new PowerShell host process has started. Excluding PowerShell.exe is a good way to find alternate PowerShell hosts\n| Data source | Event Provider | Relationship | Event |\n|\nStep3: Analytic II\nLooking for processes loading a specific PowerShell DLL is a very effective way to document the use of PowerShell in your environment\n| Data source | Event Provider | Relationship | Event |\n|\nStep4: Analytic III\nMonitoring for PSHost* pipes is another interesting way to find other alternate PowerShell hosts in your environment.\n| Data source | Event Provider | Relationship | Event |\n|"},"code_prompt":{"kind":"string","value":"Python Code:\nfrom openhunt.mordorutils import *\nspark = get_spark()\nExplanation: Alternate PowerShell Hosts\nMetadata\n| Metadata | Value |\n|:------------------|:---|\n| collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] |\n| creation date | 2019/08/15 |\n| modification date | 2020/09/20 |\n| playbook related | ['WIN-190410151110'] |\nHypothesis\nAdversaries might be leveraging alternate PowerShell Hosts to execute PowerShell evading traditional PowerShell detections that look for powershell.exe in my environment.\nTechnical Context\nNone\nOffensive Tradecraft\nAdversaries can abuse alternate signed PowerShell Hosts to evade application whitelisting solutions that block powershell.exe and naive logging based upon traditional PowerShell hosts.\nCharacteristics of a PowerShell host (Matt Graeber @mattifestation) >\n* These binaries are almost always C#/.NET .exes/.dlls\n* These binaries have System.Management.Automation.dll as a referenced assembly\n* These may not always be \"built in\" binaries\nSecurity Datasets\n| Metadata | Value |\n|:----------|:----------|\n| docs | https://securitydatasets.com/notebooks/atomic/windows/execution/SDWIN-190518211456.html |\n| link | https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/lateral_movement/host/empire_psremoting_stager.zip |\nAnalytics\nInitialize Analytics Engine\nEnd of explanation\nsd_file = \"https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/lateral_movement/host/empire_psremoting_stager.zip\"\nregisterMordorSQLTable(spark, sd_file, \"sdTable\")\nExplanation: Download & Process Security Dataset\nEnd of explanation\ndf = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Channel\nFROM sdTable\nWHERE (Channel = \"Microsoft-Windows-PowerShell/Operational\" OR Channel = \"Windows PowerShell\")\n AND (EventID = 400 OR EventID = 4103)\n AND NOT Message LIKE \"%Host Application%powershell%\"\n'''\n)\ndf.show(10,False)\nExplanation: Analytic I\nWithin the classic PowerShell log, event ID 400 indicates when a new PowerShell host process has started. Excluding PowerShell.exe is a good way to find alternate PowerShell hosts\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Powershell | Windows PowerShell | Application host started | 400 |\n| Powershell | Microsoft-Windows-PowerShell/Operational | User started Application host | 4103 |\nEnd of explanation\ndf = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Image, Description\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 7\n AND (lower(Description) = \"system.management.automation\" OR lower(ImageLoaded) LIKE \"%system.management.automation%\")\n AND NOT Image LIKE \"%powershell.exe\"\n'''\n)\ndf.show(10,False)\nExplanation: Analytic II\nLooking for processes loading a specific PowerShell DLL is a very effective way to document the use of PowerShell in your environment\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Module | Microsoft-Windows-Sysmon/Operational | Process loaded Dll | 7 |\nEnd of explanation\ndf = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Image, PipeName\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 17\n AND lower(PipeName) LIKE \"\\\\\\pshost%\"\n AND NOT Image LIKE \"%powershell.exe\"\n'''\n)\ndf.show(10,False)\nExplanation: Analytic III\nMonitoring for PSHost* pipes is another interesting way to find other alternate PowerShell hosts in your environment.\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Named pipe | Microsoft-Windows-Sysmon/Operational | Process created Pipe | 17 |\nEnd of explanation"}}},{"rowIdx":2170,"cells":{"Unnamed: 0":{"kind":"number","value":2170,"string":"2,170"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Chapter 10\nLists\nA sequence of elements of any type.\nStep1: Lists are mutable while strings are immutable. We can never change a string, only reassign it to something else.\nStep2: Some common list procedures\nStep3: list1 and list2 are equivalent (same values) but not identical (same object). In order to make these two lists identical we can alias the object.\nStep4: Now both names/variables point at the same object (reference the same object).\nStep5: Let's try to change b by assigning to a (they reference the same object after all)\nStep6: What happened is that we have reassigned a to a new object, that is they no longer point at the same object."},"code_prompt":{"kind":"string","value":"Python Code:\nL = [1,2,3]\nM = ['a', 'b', 'c']\nN = [1, 'a', 2, [32, 64]]\nExplanation: Chapter 10\nLists\nA sequence of elements of any type.\nEnd of explanation\nS = 'abc'\n#S[1] = 'z' # <== Doesn't work!\nL = ['a', 'b', 'c']\nL[1] = 'z'\nprint L\nExplanation: Lists are mutable while strings are immutable. We can never change a string, only reassign it to something else.\nEnd of explanation\na = 23\nb = 23\na is b\nlist1 = [1,2,3]\nlist2 = [1,2,3]\nlist1 is list2\nExplanation: Some common list procedures:\nreduce\nConvert a sequence (eg list) into a single element. Examples: sum, mean\nmap\nApply some function to each element of a sequence. Examples: making every element in a list positive, capitalizing all elements of a list\nfilter\nSelecting some elements of a sequence according to some condition. Examples: selecting only positive numbers from a list, selecting only elements of a list of strings that have length greater than 10.\nEverything in Python is an object. Think of an object as the underlying data. Objects have individuality. For example,\nEnd of explanation\nlist2 = list1\nlist1 is list2\nExplanation: list1 and list2 are equivalent (same values) but not identical (same object). In order to make these two lists identical we can alias the object.\nEnd of explanation\nlist1[0] = 1234\nprint list1\nprint list2\nBack to the strings,\nb = 'abc'\na = b\na is b\nExplanation: Now both names/variables point at the same object (reference the same object).\nEnd of explanation\na = 'xyz'\nprint a\nprint b\nExplanation: Let's try to change b by assigning to a (they reference the same object after all)\nEnd of explanation\na is b\nid(b)\nExplanation: What happened is that we have reassigned a to a new object, that is they no longer point at the same object.\nEnd of explanation"}}},{"rowIdx":2171,"cells":{"Unnamed: 0":{"kind":"number","value":2171,"string":"2,171"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Encontro 02, Parte 1\nStep1: Configurando a biblioteca\nA socnet disponibiliza variáveis de módulo que permitem configurar propriedades visuais. Os nomes são auto-explicativos e os valores abaixo são padrão.\nStep2: Uma variável de cor armazena uma tupla contendo três inteiros entre 0 e 255 que representam intensidades de vermelho, verde e azul respectivamente.\nUma variável de posição armazena uma string contendo duas palavras separadas por um espaço\nStep3: Abra esses arquivos em um editor de texto e note como o formato é auto-explicativo.\nVisualizando grafos\nVamos visualizar o primeiro grafo, que é não-dirigido\nStep4: Essa é a representação mais comum de grafos não-dirigidos\nStep5: Essa é a representação mais comum de grafos dirigidos\nStep6: Cada aresta também é asssociada a um dicionário que armazena seus atributos. Vamos modificar e imprimir o atributo color da aresta ${1, 2}$ do grafo ug. Esse atributo existe por padrão.\nStep7: Note que a ordem dos nós não importa, pois ug é um grafo não-dirigido.\nStep8: Os atributos color são exibidos na visualização.\nStep9: Podemos usar funções de conveniência para reinicializar as cores.\nStep10: Os atributos label também podem ser exibidos na visualização, mas não existem por padrão. Primeiramente, precisamos criá-los.\nStep11: Depois, precisamos usar os argumentos nlab e elab para indicar que queremos exibi-los. Esses argumentos são False por padrão.\nStep12: Vizinhos, predecessores e sucessores\nConsidere um grafo $(N, E)$ e um nó $n$. Suponha que esse grafo é não-dirigido.\nNesse caso, dizemos que $n$ é vizinho (neighbor) de $m$ se ${n, m} \\in E$. Denotamos por $\\mathcal{N}(n)$ o conjunto dos vizinhos de $n$.\nStep13: Suponha agora que o grafo $(N, E)$ é dirigido.\nNesse caso, dizemos que $n$ é predecessor de $m$ se $(n, m) \\in E$ e dizemos que $n$ é sucessor de $m$ se $(m, n) \\in E$. Denotamos por $\\mathcal{P}(n)$ o conjunto dos predecessores de $n$ e denotamos por $\\mathcal{S}(n)$ o conjunto dos sucessores de $n$.\nStep14: Passeios, trilhas e caminhos\nSe $(N, E)$ é um grafo não-dirigido\nStep15: Exercício 6\nUse cores para dar um exemplo de caminho no grafo dg.\nStep16: Posicionamento dos nós\nPara encerrar, vamos carregar o grafo do encontro anterior. O próprio arquivo atribui label aos nós, portanto não é necessário criá-los.\nStep17: Usamos o argumento has_pos para indicar que os atributos x e y devem ser usados para posicionar os nós. Esse argumento é False por padrão, pois nem todo arquivo atribui essas coordenadas.\nSe elas não forem usadas, a visualização usa um tipo de force-directed graph drawing."},"code_prompt":{"kind":"string","value":"Python Code:\nimport sys\nsys.path.append('..')\nimport socnet as sn\nExplanation: Encontro 02, Parte 1: Revisão de Grafos\nEste guia foi escrito para ajudar você a atingir os seguintes objetivos:\nformalizar conceitos básicos de teoria dos grafos;\nusar funcionalidades básicas da biblioteca da disciplina.\nGrafos não-dirigidos\nUm grafo não-dirigido (undirected graph) é um par\n$(N, E)$,\nonde $N$ é um conjunto qualquer e $E$ é um conjunto de pares não-ordenados de elementos de $N$, ou seja,\n$E \\subseteq {{n, m} \\colon n \\in N \\textrm{ e } m \\in N}$.\nUm elemento de $N$ chama-se nó (node) e um elemento de $E$ chama-se aresta (edge). Em alguns trabalhos, usa-se $V$ e vértice em vez de $N$ e nó.\nGrafos dirigidos\nFormalmente, um grafo dirigido (directed graph) é um par\n$(N, E)$,\nonde $N$ é um conjunto qualquer e $E$ é um conjunto de pares ordenados de elementos de N, ou seja,\n$E \\subseteq {(n, m) \\colon n \\in N \\textrm{ e } m \\in N}$.\nUm elemento de $N$ chama-se nó (node) e um elemento de $E$ chama-se aresta (edge). Em alguns trabalhos, usa-se $V$ e vértice em vez de $N$ e nó e usa-se $A$ e arco em vez de $E$ e aresta.\nInstalando as dependências\nAntes de continuar, instale as duas dependências da biblioteca da disciplina:\npip install networkx plotly\nEm algumas distribuições Linux você deve usar o comando pip3, pois o comando pip está associado a Python 2 por padrão.\nImportando a biblioteca\nNão mova ou renomeie os arquivos do repositório, a menos que você esteja disposto a adaptar os notebooks de acordo.\nVamos importar a biblioteca da disciplina no notebook:\nEnd of explanation\nsn.graph_width = 800\nsn.graph_height = 450\nsn.node_size = 20\nsn.node_color = (255, 255, 255)\nsn.edge_width = 2\nsn.edge_color = (0, 0, 0)\nsn.node_label_position = 'middle center'\nsn.edge_label_distance = 10\nExplanation: Configurando a biblioteca\nA socnet disponibiliza variáveis de módulo que permitem configurar propriedades visuais. Os nomes são auto-explicativos e os valores abaixo são padrão.\nEnd of explanation\nug = sn.load_graph('5-kruskal.gml', has_pos=True)\ndg = sn.load_graph('4-dijkstra.gml', has_pos=True)\nExplanation: Uma variável de cor armazena uma tupla contendo três inteiros entre 0 e 255 que representam intensidades de vermelho, verde e azul respectivamente.\nUma variável de posição armazena uma string contendo duas palavras separadas por um espaço:\n* a primeira representa o alinhamento vertical e pode ser top, middle ou bottom;\n* a segunda representa o alinhamento horizontal e pode ser left, center ou right.\nCarregando grafos\nVamos carregar dois grafos no formato GML:\nEnd of explanation\nsn.graph_width = 320\nsn.graph_height = 180\nsn.show_graph(ug)\nExplanation: Abra esses arquivos em um editor de texto e note como o formato é auto-explicativo.\nVisualizando grafos\nVamos visualizar o primeiro grafo, que é não-dirigido:\nEnd of explanation\nsn.graph_width = 320\nsn.graph_height = 180\nsn.show_graph(dg)\nExplanation: Essa é a representação mais comum de grafos não-dirigidos: círculos como nós e retas como arestas. Se uma reta conecta o círculo que representa $n$ ao círculo que representa $m$, ela representa a aresta ${n, m}$.\nVamos agora visualizar o segundo grafo, que é dirigido:\nEnd of explanation\nug.node[0]['color'] = (0, 0, 255)\nprint(ug.node[0]['color'])\nExplanation: Essa é a representação mais comum de grafos dirigidos: círculos como nós e setas como arestas. Se uma seta sai do círculo que representa $n$ e entra no círculo que representa $m$, ela representa a aresta $(n, m)$.\nNote que as duas primeiras linhas não são necessárias se você rodou a célula anterior, pois os valores atribuídos a graph_width e graph_height são exatamente iguais.\nAtributos de nós e arestas\nNa estrutura de dados usada pela socnet, os nós são inteiros e cada nó é asssociado a um dicionário que armazena seus atributos. Vamos modificar e imprimir o atributo color do nó $0$ do grafo ug. Esse atributo existe por padrão.\nEnd of explanation\nug.edge[1][2]['color'] = (0, 255, 0)\nprint(ug.edge[1][2]['color'])\nExplanation: Cada aresta também é asssociada a um dicionário que armazena seus atributos. Vamos modificar e imprimir o atributo color da aresta ${1, 2}$ do grafo ug. Esse atributo existe por padrão.\nEnd of explanation\nug.edge[2][1]['color'] = (255, 0, 255)\nprint(ug.edge[1][2]['color'])\nExplanation: Note que a ordem dos nós não importa, pois ug é um grafo não-dirigido.\nEnd of explanation\nsn.show_graph(ug)\nExplanation: Os atributos color são exibidos na visualização.\nEnd of explanation\nsn.reset_node_colors(ug)\nsn.reset_edge_colors(ug)\nsn.show_graph(ug)\nExplanation: Podemos usar funções de conveniência para reinicializar as cores.\nEnd of explanation\nfor n in ug.nodes():\n ug.node[n]['label'] = str(n)\nfor n, m in ug.edges():\n ug.edge[n][m]['label'] = '?'\nfor n in dg.nodes():\n dg.node[n]['label'] = str(n)\nfor n, m in dg.edges():\n dg.edge[n][m]['label'] = '?'\nExplanation: Os atributos label também podem ser exibidos na visualização, mas não existem por padrão. Primeiramente, precisamos criá-los.\nEnd of explanation\nsn.show_graph(ug, nlab=True, elab=True)\nsn.show_graph(dg, nlab=True, elab=True)\nExplanation: Depois, precisamos usar os argumentos nlab e elab para indicar que queremos exibi-los. Esses argumentos são False por padrão.\nEnd of explanation\nprint(ug.neighbors(0))\nExplanation: Vizinhos, predecessores e sucessores\nConsidere um grafo $(N, E)$ e um nó $n$. Suponha que esse grafo é não-dirigido.\nNesse caso, dizemos que $n$ é vizinho (neighbor) de $m$ se ${n, m} \\in E$. Denotamos por $\\mathcal{N}(n)$ o conjunto dos vizinhos de $n$.\nEnd of explanation\nprint(dg.successors(0))\nprint(dg.predecessors(1))\nExplanation: Suponha agora que o grafo $(N, E)$ é dirigido.\nNesse caso, dizemos que $n$ é predecessor de $m$ se $(n, m) \\in E$ e dizemos que $n$ é sucessor de $m$ se $(m, n) \\in E$. Denotamos por $\\mathcal{P}(n)$ o conjunto dos predecessores de $n$ e denotamos por $\\mathcal{S}(n)$ o conjunto dos sucessores de $n$.\nEnd of explanation\nug.node[0]['color'] = (0, 0, 255)\nug.node[1]['color'] = (0, 0, 255)\nug.node[2]['color'] = (0, 0, 255)\nug.node[3]['color'] = (0, 0, 255)\nug.node[4]['color'] = (0, 0, 255)\nug.node[5]['color'] = (0, 0, 255)\nug.edge[0][1]['color'] = (0, 255, 0)\nug.edge[1][2]['color'] = (0, 255, 0)\nug.edge[2][3]['color'] = (0, 255, 0)\nug.edge[3][4]['color'] = (0, 255, 0)\nug.edge[4][5]['color'] = (0, 255, 0)\nsn.show_graph(ug)\nExplanation: Passeios, trilhas e caminhos\nSe $(N, E)$ é um grafo não-dirigido:\num passeio (walk) é uma sequência de nós $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ tal que, para todo $i$ entre $0$ e $k-2$, temos que ${n_i, n_{i + 1}} \\in E$;\numa trilha (trail) é um passeio $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ no qual não existem índices $i$ e $j$ entre $0$ e $k-2$ tais que $i \\neq j$ e ${n_i, n_{i+1}} = {n_j, n_{j+1}}$;\num caminho (path) é um passeio $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ no qual não existem índices $i$ e $j$ entre $0$ e $k-1$ tais que $i \\neq j$ e $n_i = n_j$.\nSe $(N, E)$ é um grafo dirigido:\num passeio (walk) é uma sequência de nós $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ tal que, para todo $i$ entre $0$ e $k-2$, temos que $(n_i, n_{i + 1}) \\in E$;\numa trilha (trail) é um passeio $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ no qual não existem índices $i$ e $j$ entre $0$ e $k-2$ tais que $i \\neq j$ e $(n_i, n_{i+1}) = (n_j, n_{j+1})$;\num caminho (path) é um passeio $\\langle n_0, n_1, \\ldots, n_{k-1} \\rangle$ no qual não existem índices $i$ e $j$ entre $0$ e $k-1$ tais que $i \\neq j$ e $n_i = n_j$.\nPode-se dizer que uma trilha é um passeio que não repete arestas e um caminho é um passeio que não repete nós.\nExercício 1\nDê um exemplo de passeio que não é trilha no grafo ug.\nUm passeio que não é trilha é o seguinte:\n - 0, 1, 7, 8, 6, 7, 1, 0\nExercício 2\nDê um exemplo de passeio que não é trilha no grafo dg.\nUm exemplo de passeio que não é trilha é o seguinte:\n - 0, 1, 3, 4, 0, 1, 2\nExercício 3\nDê um exemplo de trilha que não é caminho no grafo ug.\nUm exemplo de trilha que não é caminho é o seguinte:\n - 0, 1, 2, 5, 6, 8, 2, 3, 4, 5, 3\nExercício 4\nDê um exemplo de trilha que não é caminho no grafo dg.\nUm exemplo de trilha que não é caminho é o seguinte:\n - 0, 1, 3, 2, 4, 2\nExercício 5\nUse cores para dar um exemplo de caminho no grafo ug.\nEnd of explanation\ndg.node[0]['color'] = (0, 0, 255)\ndg.edge[0][1]['color'] = (0, 255, 0)\ndg.node[1]['color'] = (0, 0, 255)\ndg.edge[1][3]['color'] = (0, 255, 0)\ndg.node[3]['color'] = (0, 0, 255)\ndg.edge[3][2]['color'] = (0, 255, 0)\ndg.node[2]['color'] = (0, 0, 255)\ndg.edge[2][4]['color'] = (0, 255, 0)\ndg.node[4]['color'] = (0, 0, 255)\nsn.show_graph(dg)\nExplanation: Exercício 6\nUse cores para dar um exemplo de caminho no grafo dg.\nEnd of explanation\nsn.graph_width = 450\nsn.graph_height = 450\nsn.node_label_position = 'hover' # easter egg!\ng = sn.load_graph('1-introducao.gml', has_pos=True)\nsn.show_graph(g, nlab=True)\nExplanation: Posicionamento dos nós\nPara encerrar, vamos carregar o grafo do encontro anterior. O próprio arquivo atribui label aos nós, portanto não é necessário criá-los.\nEnd of explanation\ng = sn.load_graph('1-introducao.gml')\nsn.show_graph(g, nlab=True)\nExplanation: Usamos o argumento has_pos para indicar que os atributos x e y devem ser usados para posicionar os nós. Esse argumento é False por padrão, pois nem todo arquivo atribui essas coordenadas.\nSe elas não forem usadas, a visualização usa um tipo de force-directed graph drawing.\nEnd of explanation"}}},{"rowIdx":2172,"cells":{"Unnamed: 0":{"kind":"number","value":2172,"string":"2,172"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n EventVestor\nStep1: Let's go over the columns\nStep2: Finally, suppose we want the above as a DataFrame"},"code_prompt":{"kind":"string","value":"Python Code:\n# import the dataset\nfrom quantopian.interactive.data.eventvestor import contract_win\n# or if you want to import the free dataset, use:\n# from quantopian.data.eventvestor import contract_win_free\n# import data operations\nfrom odo import odo\n# import other libraries we will use\nimport pandas as pd\n# Let's use blaze to understand the data a bit using Blaze dshape()\ncontract_win.dshape\n# And how many rows are there?\n# N.B. we're using a Blaze function to do this, not len()\ncontract_win.count()\n# Let's see what the data looks like. We'll grab the first three rows.\ncontract_win[:3]\nExplanation: EventVestor: Contract Wins\nIn this notebook, we'll take a look at EventVestor's Contract Wins dataset, available on the Quantopian Store. This dataset spans January 01, 2007 through the current day, and documents major contract wins by companies.\nBlaze\nBefore we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as Blaze. Blaze provides the Quantopian user with a convenient interface to access very large datasets.\nBlaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side.\nIt is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization.\nHelpful links:\n* Query building for Blaze\n* Pandas-to-Blaze dictionary\n* SQL-to-Blaze dictionary.\nOnce you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using:\nfrom odo import odo\nodo(expr, pandas.DataFrame)\nFree samples and limits\nOne other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze.\nThere is a free version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day.\nWith preamble in place, let's get started:\nEnd of explanation\nba_sid = symbols('BA').sid\nwins = contract_win[contract_win.sid == ba_sid][['timestamp', 'contract_amount','amount_units','contract_entity']].sort('timestamp')\n# When displaying a Blaze Data Object, the printout is automatically truncated to ten rows.\nwins\nExplanation: Let's go over the columns:\n- event_id: the unique identifier for this contract win.\n- asof_date: EventVestor's timestamp of event capture.\n- trade_date: for event announcements made before trading ends, trade_date is the same as event_date. For announcements issued after market close, trade_date is next market open day.\n- symbol: stock ticker symbol of the affected company.\n- event_type: this should always be Contract Win.\n- contract_amount: the amount of amount_units the contract is for.\n- amount_units: the currency or other units for the value of the contract. Most commonly in millions of dollars.\n- contract_entity: name of the customer, if available\n- event_rating: this is always 1. The meaning of this is uncertain.\n- timestamp: this is our timestamp on when we registered the data.\n- sid: the equity's unique identifier. Use this instead of the symbol.\nWe've done much of the data processing for you. Fields like timestamp and sid are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the sid across all our equity databases.\nWe can select columns and rows with ease. Below, we'll fetch all contract wins by Boeing. We'll display only the contract_amount, amount_units, contract_entity, and timestamp. We'll sort by date.\nEnd of explanation\nba_df = odo(wins, pd.DataFrame)\n# Printing a pandas DataFrame displays the first 30 and last 30 items, and truncates the middle.\nba_df\nExplanation: Finally, suppose we want the above as a DataFrame:\nEnd of explanation"}}},{"rowIdx":2173,"cells":{"Unnamed: 0":{"kind":"number","value":2173,"string":"2,173"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Theory and Practice of Visualization Exercise 1\nImports\nStep1: Graphical excellence and integrity\nFind a data-focused visualization on one of the following websites that is a positive example of the principles that Tufte describes in The Visual Display of Quantitative Information.\nVox\nUpshot\n538\nBuzzFeed\nUpload the image for the visualization to this directory and display the image inline in this notebook."},"code_prompt":{"kind":"string","value":"Python Code:\nfrom IPython.display import Image\nExplanation: Theory and Practice of Visualization Exercise 1\nImports\nEnd of explanation\n# Add your filename and uncomment the following line:\nImage(filename='alcohol-consumption-by-country-pure-alcohol-consumption-per-drinker-2010_chartbuilder-1.png')\nExplanation: Graphical excellence and integrity\nFind a data-focused visualization on one of the following websites that is a positive example of the principles that Tufte describes in The Visual Display of Quantitative Information.\nVox\nUpshot\n538\nBuzzFeed\nUpload the image for the visualization to this directory and display the image inline in this notebook.\nEnd of explanation"}}},{"rowIdx":2174,"cells":{"Unnamed: 0":{"kind":"number","value":2174,"string":"2,174"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n T81-558\nStep1: Toolkit\nStep2: Binary Classification\nBinary classification is used to create a model that classifies between only two classes. These two classes are often called \"positive\" and \"negative\". Consider the following program that uses the wcbreast_wdbc dataset to classify if a breast tumor is cancerous (malignant) or not (benign). The iris dataset is not binary, because there are three classes (3 types of iris).\nStep3: Confusion Matrix\nThe confusion matrix is a common visualization for both binary and larger classification problems. Often a model will have difficulty differentiating between two classes. For example, a neural network might be really good at telling the difference between cats and dogs, but not so good at telling the difference between dogs and wolves. The following code generates a confusion matrix\nStep4: The above two confusion matrixes show the same network. The bottom (normalized) is the type you will normally see. Notice the two labels. The label \"B\" means benign (no cancer) and the label \"M\" means malignant (cancer). The left-right (x) axis are the predictions, the top-bottom) are the expected outcomes. A perfect model (that never makes an error) has a dark blue diagonal that runs from top-left to bottom-right. \nTo read, consider the top-left square. This square indicates \"true labeled\" of B and also \"predicted label\" of B. This is good! The prediction matched the truth. The blueness of this box represents how often \"B\" is classified correct. It is not darkest blue. This is because the square to the right(which is off the perfect diagonal) has some color. This square indicates truth of \"B\" but prediction of \"M\". The white square, at the bottom-left, indicates a true of \"M\" but predicted of \"B\". The whiteness indicates this rarely happens. \nYour conclusion from the above chart is that the model sometimes classifies \"B\" as \"M\" (a false negative), but never mis-classifis \"M\" as \"B\". Always look for the dark diagonal, this is good!\nROC Curves\nROC curves can be a bit confusing. However, they are very common. It is important to know how to read them. Even their name is confusing. Do not worry about their name, it comes from electrical engineering (EE).\nBinary classification is common in medical testing. Often you want to diagnose if someone has a disease. This can lead to two types of errors, know as false positives and false negatives\nStep5: Classification\nWe've already seen multi-class classification, with the iris dataset. Confusion matrixes work just fine with 3 classes. The following code generates a confusion matrix for iris.\nStep6: See the strong diagonal? Iris is easy. See the light blue near the bottom? Sometimes virginica is confused for versicolor.\nRegression\nWe've already seen regression with the MPG dataset. Regression uses its own set of visualizations, one of the most common is the lift chart. The following code generates a lift chart."},"code_prompt":{"kind":"string","value":"Python Code:\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)\ndef encode_text_dummy(df,name):\n dummies = pd.get_dummies(df[name])\n for x in dummies.columns:\n dummy_name = \"{}-{}\".format(name,x)\n df[dummy_name] = dummies[x]\n df.drop(name, axis=1, inplace=True)\n# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).\ndef encode_text_index(df,name):\n le = preprocessing.LabelEncoder()\n df[name] = le.fit_transform(df[name])\n return le.classes_\n# Encode a numeric column as zscores\ndef encode_numeric_zscore(df,name,mean=None,sd=None):\n if mean is None:\n mean = df[name].mean()\n if sd is None:\n sd = df[name].std()\n df[name] = (df[name]-mean)/sd\n# Convert all missing values in the specified column to the median\ndef missing_median(df, name):\n med = df[name].median()\n df[name] = df[name].fillna(med)\n# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs\ndef to_xy(df,target):\n result = []\n for x in df.columns:\n if x != target:\n result.append(x)\n # find out the type of the target column. Is it really this hard? :(\n target_type = df[target].dtypes\n target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type\n \n # Encode to int for classification, float otherwise. TensorFlow likes 32 bits.\n if target_type in (np.int64, np.int32):\n # Classification\n return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.int32)\n else:\n # Regression\n return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.float32)\n \n# Nicely formatted time string\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)\nExplanation: T81-558: Applications of Deep Neural Networks\nClass 4: Classification and Regression\n* Instructor: Jeff Heaton, School of Engineering and Applied Science, Washington University in St. Louis\n* For more information visit the class website.\nBinary Classification, Classification and Regression\nBinary Classification - Classification between two possibilities (positive and negative). Common in medical testing, does the person have the disease (positive) or not (negative).\nClassification - Classification between more than 2. The iris dataset (3-way classification).\nRegression - Numeric prediction. How many MPG does a car get?\nIn this class session we will look at some visualizations for all three.\nFeature Vector Encoding\nThese are exactly the same feature vector encoding functions from Class 3. They must be defined for this class as well. For more information, refer to class 3.\nEnd of explanation\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n# Plot a confusion matrix.\n# cm is the confusion matrix, names are the names of the classes.\ndef plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(names))\n plt.xticks(tick_marks, names, rotation=45)\n plt.yticks(tick_marks, names)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n# Plot an ROC. pred - the predictions, y - the expected output.\ndef plot_roc(pred,y):\n fpr, tpr, _ = roc_curve(y_test, pred)\n roc_auc = auc(fpr, tpr)\n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic (ROC)')\n plt.legend(loc=\"lower right\")\n plt.show()\n \n# Plot a lift curve. pred - the predictions, y - the expected output.\ndef chart_regression(pred,y):\n t = pd.DataFrame({'pred' : pred.flatten(), 'y' : y_test.flatten()})\n t.sort_values(by=['y'],inplace=True)\n a = plt.plot(t['y'].tolist(),label='expected')\n b = plt.plot(t['pred'].tolist(),label='prediction')\n plt.ylabel('output')\n plt.legend()\n plt.show()\nExplanation: Toolkit: Visualization Functions\nThis class will introduce 3 different visualizations that can be used with the two different classification type neural networks and regression neural networks.\nConfusion Matrix - For any type of classification neural network.\nROC Curve - For binary classification.\nLift Curve - For regression neural networks.\nThe code used to produce these visualizations is shown here:\nEnd of explanation\nimport os\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nimport tensorflow.contrib.learn as skflow\nimport numpy as np\nfrom sklearn import metrics\npath = \"./data/\"\n \nfilename = os.path.join(path,\"wcbreast_wdbc.csv\") \ndf = pd.read_csv(filename,na_values=['NA','?'])\n# Encode feature vector\ndf.drop('id',axis=1,inplace=True)\nencode_numeric_zscore(df,'mean_radius')\nencode_text_index(df,'mean_texture') \nencode_text_index(df,'mean_perimeter')\nencode_text_index(df,'mean_area')\nencode_text_index(df,'mean_smoothness')\nencode_text_index(df,'mean_compactness')\nencode_text_index(df,'mean_concavity')\nencode_text_index(df,'mean_concave_points')\nencode_text_index(df,'mean_symmetry')\nencode_text_index(df,'mean_fractal_dimension')\nencode_text_index(df,'se_radius')\nencode_text_index(df,'se_texture')\nencode_text_index(df,'se_perimeter')\nencode_text_index(df,'se_area')\nencode_text_index(df,'se_smoothness')\nencode_text_index(df,'se_compactness')\nencode_text_index(df,'se_concavity')\nencode_text_index(df,'se_concave_points')\nencode_text_index(df,'se_symmetry')\nencode_text_index(df,'se_fractal_dimension')\nencode_text_index(df,'worst_radius')\nencode_text_index(df,'worst_texture')\nencode_text_index(df,'worst_perimeter')\nencode_text_index(df,'worst_area')\nencode_text_index(df,'worst_smoothness')\nencode_text_index(df,'worst_compactness')\nencode_text_index(df,'worst_concavity')\nencode_text_index(df,'worst_concave_points')\nencode_text_index(df,'worst_symmetry')\nencode_text_index(df,'worst_fractal_dimension')\ndiagnosis = encode_text_index(df,'diagnosis')\nnum_classes = len(diagnosis)\n# Create x & y for training\n# Create the x-side (feature vectors) of the training\nx, y = to_xy(df,'diagnosis')\n \n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split( \n x, y, test_size=0.25, random_state=42) \n \n# Create a deep neural network with 3 hidden layers of 10, 20, 10\nclassifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,\n steps=10000)\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50, n_classes=num_classes)\n \n# Fit/train neural network\nclassifier.fit(x_train, y_train, early_stop)\n# Measure accuracy\nscore = metrics.accuracy_score(y, classifier.predict(x))\nprint(\"Final accuracy: {}\".format(score))\nExplanation: Binary Classification\nBinary classification is used to create a model that classifies between only two classes. These two classes are often called \"positive\" and \"negative\". Consider the following program that uses the wcbreast_wdbc dataset to classify if a breast tumor is cancerous (malignant) or not (benign). The iris dataset is not binary, because there are three classes (3 types of iris).\nEnd of explanation\nimport numpy as np\nfrom sklearn import svm, datasets\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\npred = classifier.predict(x_test)\n \n# Compute confusion matrix\ncm = confusion_matrix(y_test, pred)\nnp.set_printoptions(precision=2)\nprint('Confusion matrix, without normalization')\nprint(cm)\nplt.figure()\nplot_confusion_matrix(cm, diagnosis)\n# Normalize the confusion matrix by row (i.e by the number of samples\n# in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Normalized confusion matrix')\nprint(cm_normalized)\nplt.figure()\nplot_confusion_matrix(cm_normalized, diagnosis, title='Normalized confusion matrix')\nplt.show()\nExplanation: Confusion Matrix\nThe confusion matrix is a common visualization for both binary and larger classification problems. Often a model will have difficulty differentiating between two classes. For example, a neural network might be really good at telling the difference between cats and dogs, but not so good at telling the difference between dogs and wolves. The following code generates a confusion matrix:\nEnd of explanation\npred = classifier.predict_proba(x_test)\npred = pred[:,1] # Only positive cases\n# print(pred[:,1])\nplot_roc(pred,y_test)\nExplanation: The above two confusion matrixes show the same network. The bottom (normalized) is the type you will normally see. Notice the two labels. The label \"B\" means benign (no cancer) and the label \"M\" means malignant (cancer). The left-right (x) axis are the predictions, the top-bottom) are the expected outcomes. A perfect model (that never makes an error) has a dark blue diagonal that runs from top-left to bottom-right. \nTo read, consider the top-left square. This square indicates \"true labeled\" of B and also \"predicted label\" of B. This is good! The prediction matched the truth. The blueness of this box represents how often \"B\" is classified correct. It is not darkest blue. This is because the square to the right(which is off the perfect diagonal) has some color. This square indicates truth of \"B\" but prediction of \"M\". The white square, at the bottom-left, indicates a true of \"M\" but predicted of \"B\". The whiteness indicates this rarely happens. \nYour conclusion from the above chart is that the model sometimes classifies \"B\" as \"M\" (a false negative), but never mis-classifis \"M\" as \"B\". Always look for the dark diagonal, this is good!\nROC Curves\nROC curves can be a bit confusing. However, they are very common. It is important to know how to read them. Even their name is confusing. Do not worry about their name, it comes from electrical engineering (EE).\nBinary classification is common in medical testing. Often you want to diagnose if someone has a disease. This can lead to two types of errors, know as false positives and false negatives:\nFalse Positive - Your test (neural network) indicated that the patient had the disease; however, the patient did not have the disease.\nFalse Negative - Your test (neural network) indicated that the patient did not have the disease; however, the patient did have the disease.\nTrue Positive - Your test (neural network) correctly identified that the patient had the disease.\nTrue Negative - Your test (neural network) correctly identified that the patient did not have the disease.\nTypes of errors:\nNeural networks classify in terms of probbility of it being positive. However, at what probability do you give a positive result? Is the cutoff 50%? 90%? Where you set this cutoff is called the threshold. Anything above the cutoff is positive, anything below is negative. Setting this cutoff allows the model to be more sensative or specific:\nThe following shows a more sensitive cutoff:\nAn ROC curve measures how good a model is regardless of the cutoff. The following shows how to read a ROC chart:\nThe following code shows an ROC chart for the breast cancer neural network. The area under the curve (AUC) is also an important measure. The larger the AUC, the better.\nEnd of explanation\nimport os\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nimport tensorflow.contrib.learn as skflow\nimport numpy as np\npath = \"./data/\"\n \nfilename = os.path.join(path,\"iris.csv\") \ndf = pd.read_csv(filename,na_values=['NA','?'])\n# Encode feature vector\nencode_numeric_zscore(df,'petal_w')\nencode_numeric_zscore(df,'petal_l')\nencode_numeric_zscore(df,'sepal_w')\nencode_numeric_zscore(df,'sepal_l')\nspecies = encode_text_index(df,\"species\")\nnum_classes = len(species)\n# Create x & y for training\n# Create the x-side (feature vectors) of the training\nx, y = to_xy(df,'species')\n \n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split( \n x, y, test_size=0.25, random_state=45) \n # as much as I would like to use 42, it gives a perfect result, and a boring confusion matrix!\n \n# Create a deep neural network with 3 hidden layers of 10, 20, 10\nclassifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,\n steps=10000)\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50, n_classes=num_classes)\n \n# Fit/train neural network\nclassifier.fit(x_train, y_train, early_stop)\nimport numpy as np\nfrom sklearn import svm, datasets\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\npred = classifier.predict(x_test)\n \n# Compute confusion matrix\ncm = confusion_matrix(y_test, pred)\nnp.set_printoptions(precision=2)\nprint('Confusion matrix, without normalization')\nprint(cm)\nplt.figure()\nplot_confusion_matrix(cm, species)\n# Normalize the confusion matrix by row (i.e by the number of samples\n# in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Normalized confusion matrix')\nprint(cm_normalized)\nplt.figure()\nplot_confusion_matrix(cm_normalized, species, title='Normalized confusion matrix')\nplt.show()\nExplanation: Classification\nWe've already seen multi-class classification, with the iris dataset. Confusion matrixes work just fine with 3 classes. The following code generates a confusion matrix for iris.\nEnd of explanation\nimport tensorflow.contrib.learn as skflow\nimport pandas as pd\nimport os\nimport numpy as np\nfrom sklearn import metrics\nfrom scipy.stats import zscore\npath = \"./data/\"\nfilename_read = os.path.join(path,\"auto-mpg.csv\")\ndf = pd.read_csv(filename_read,na_values=['NA','?'])\n# create feature vector\nmissing_median(df, 'horsepower')\ndf.drop('name',1,inplace=True)\nencode_numeric_zscore(df, 'horsepower')\nencode_numeric_zscore(df, 'weight')\nencode_numeric_zscore(df, 'cylinders')\nencode_numeric_zscore(df, 'displacement')\nencode_numeric_zscore(df, 'acceleration')\nencode_text_dummy(df, 'origin')\n# Encode to a 2D matrix for training\nx,y = to_xy(df,['mpg'])\n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.25, random_state=42)\n# Create a deep neural network with 3 hidden layers of 50, 25, 10\nregressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000)\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50)\n# Fit/train neural network\nregressor.fit(x_train, y_train, early_stop)\npred = regressor.predict(x_test)\nchart_regression(pred,y_test)\nExplanation: See the strong diagonal? Iris is easy. See the light blue near the bottom? Sometimes virginica is confused for versicolor.\nRegression\nWe've already seen regression with the MPG dataset. Regression uses its own set of visualizations, one of the most common is the lift chart. The following code generates a lift chart.\nEnd of explanation"}}},{"rowIdx":2175,"cells":{"Unnamed: 0":{"kind":"number","value":2175,"string":"2,175"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Computing covariance matrix\nStep1: Source estimation method such as MNE require a noise estimations from the\nrecordings. In this tutorial we cover the basics of noise covariance and\nconstruct a noise covariance matrix that can be used when computing the\ninverse solution. For more information, see BABDEEEB.\nStep2: The definition of noise depends on the paradigm. In MEG it is quite common\nto use empty room measurements for the estimation of sensor noise. However if\nyou are dealing with evoked responses, you might want to also consider\nresting state brain activity as noise.\nFirst we compute the noise using empty room recording. Note that you can also\nuse only a part of the recording with tmin and tmax arguments. That can be\nuseful if you use resting state as a noise baseline. Here we use the whole\nempty room recording to compute the noise covariance (tmax=None is the same\nas the end of the recording, see \nStep3: Now that you the covariance matrix in a python object you can save it to a\nfile with \nStep4: Note that this method also attenuates the resting state activity in your\nsource estimates.\nStep5: Plot the covariance matrices\nTry setting proj to False to see the effect. Notice that the projectors in\nepochs are already applied, so proj parameter has no effect.\nStep6: How should I regularize the covariance matrix?\nThe estimated covariance can be numerically\nunstable and tends to induce correlations between estimated source amplitudes\nand the number of samples available. The MNE manual therefore suggests to\nregularize the noise covariance matrix (see\ncov_regularization), especially if only few samples are available.\nUnfortunately it is not easy to tell the effective number of samples, hence,\nto choose the appropriate regularization.\nIn MNE-Python, regularization is done using advanced regularization methods\ndescribed in [1]_. For this the 'auto' option can be used. With this\noption cross-validation will be used to learn the optimal regularization\nStep7: This procedure evaluates the noise covariance quantitatively by how well it\nwhitens the data using the\nnegative log-likelihood of unseen data. The final result can also be visually\ninspected.\nUnder the assumption that the baseline does not contain a systematic signal\n(time-locked to the event of interest), the whitened baseline signal should\nbe follow a multivariate Gaussian distribution, i.e.,\nwhitened baseline signals should be between -1.96 and 1.96 at a given time\nsample.\nBased on the same reasoning, the expected value for the global field power\n(GFP) is 1 (calculation of the GFP should take into account the true degrees\nof freedom, e.g. ddof=3 with 2 active SSP vectors)\nStep8: This plot displays both, the whitened evoked signals for each channels and\nthe whitened GFP. The numbers in the GFP panel represent the estimated rank\nof the data, which amounts to the effective degrees of freedom by which the\nsquared sum across sensors is divided when computing the whitened GFP.\nThe whitened GFP also helps detecting spurious late evoked components which\ncan be the consequence of over- or under-regularization.\nNote that if data have been processed using signal space separation\n(SSS) [2],\ngradiometers and magnetometers will be displayed jointly because both are\nreconstructed from the same SSS basis vectors with the same numerical rank.\nThis also implies that both sensor types are not any longer statistically\nindependent.\nThese methods for evaluation can be used to assess model violations.\nAdditional\nintroductory materials can be found here &lt;https"},"code_prompt":{"kind":"string","value":"Python Code:\nimport os.path as op\nimport mne\nfrom mne.datasets import sample\nExplanation: Computing covariance matrix\nEnd of explanation\ndata_path = sample.data_path()\nraw_empty_room_fname = op.join(\n data_path, 'MEG', 'sample', 'ernoise_raw.fif')\nraw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname, add_eeg_ref=False)\nraw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False)\nraw.set_eeg_reference()\nraw.info['bads'] += ['EEG 053'] # bads + 1 more\nExplanation: Source estimation method such as MNE require a noise estimations from the\nrecordings. In this tutorial we cover the basics of noise covariance and\nconstruct a noise covariance matrix that can be used when computing the\ninverse solution. For more information, see BABDEEEB.\nEnd of explanation\nnoise_cov = mne.compute_raw_covariance(raw_empty_room, tmin=0, tmax=None)\nExplanation: The definition of noise depends on the paradigm. In MEG it is quite common\nto use empty room measurements for the estimation of sensor noise. However if\nyou are dealing with evoked responses, you might want to also consider\nresting state brain activity as noise.\nFirst we compute the noise using empty room recording. Note that you can also\nuse only a part of the recording with tmin and tmax arguments. That can be\nuseful if you use resting state as a noise baseline. Here we use the whole\nempty room recording to compute the noise covariance (tmax=None is the same\nas the end of the recording, see :func:mne.compute_raw_covariance).\nEnd of explanation\nevents = mne.find_events(raw)\nepochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.0,\n baseline=(-0.2, 0.0))\nExplanation: Now that you the covariance matrix in a python object you can save it to a\nfile with :func:mne.write_cov. Later you can read it back to a python\nobject using :func:mne.read_cov.\nYou can also use the pre-stimulus baseline to estimate the noise covariance.\nFirst we have to construct the epochs. When computing the covariance, you\nshould use baseline correction when constructing the epochs. Otherwise the\ncovariance matrix will be inaccurate. In MNE this is done by default, but\njust to be sure, we define it here manually.\nEnd of explanation\nnoise_cov_baseline = mne.compute_covariance(epochs)\nExplanation: Note that this method also attenuates the resting state activity in your\nsource estimates.\nEnd of explanation\nnoise_cov.plot(raw_empty_room.info, proj=True)\nnoise_cov_baseline.plot(epochs.info)\nExplanation: Plot the covariance matrices\nTry setting proj to False to see the effect. Notice that the projectors in\nepochs are already applied, so proj parameter has no effect.\nEnd of explanation\ncov = mne.compute_covariance(epochs, tmax=0., method='auto')\nExplanation: How should I regularize the covariance matrix?\nThe estimated covariance can be numerically\nunstable and tends to induce correlations between estimated source amplitudes\nand the number of samples available. The MNE manual therefore suggests to\nregularize the noise covariance matrix (see\ncov_regularization), especially if only few samples are available.\nUnfortunately it is not easy to tell the effective number of samples, hence,\nto choose the appropriate regularization.\nIn MNE-Python, regularization is done using advanced regularization methods\ndescribed in [1]_. For this the 'auto' option can be used. With this\noption cross-validation will be used to learn the optimal regularization:\nEnd of explanation\nevoked = epochs.average()\nevoked.plot_white(cov)\nExplanation: This procedure evaluates the noise covariance quantitatively by how well it\nwhitens the data using the\nnegative log-likelihood of unseen data. The final result can also be visually\ninspected.\nUnder the assumption that the baseline does not contain a systematic signal\n(time-locked to the event of interest), the whitened baseline signal should\nbe follow a multivariate Gaussian distribution, i.e.,\nwhitened baseline signals should be between -1.96 and 1.96 at a given time\nsample.\nBased on the same reasoning, the expected value for the global field power\n(GFP) is 1 (calculation of the GFP should take into account the true degrees\nof freedom, e.g. ddof=3 with 2 active SSP vectors):\nEnd of explanation\ncovs = mne.compute_covariance(epochs, tmax=0., method=('empirical', 'shrunk'),\n return_estimators=True)\nevoked = epochs.average()\nevoked.plot_white(covs)\nExplanation: This plot displays both, the whitened evoked signals for each channels and\nthe whitened GFP. The numbers in the GFP panel represent the estimated rank\nof the data, which amounts to the effective degrees of freedom by which the\nsquared sum across sensors is divided when computing the whitened GFP.\nThe whitened GFP also helps detecting spurious late evoked components which\ncan be the consequence of over- or under-regularization.\nNote that if data have been processed using signal space separation\n(SSS) [2],\ngradiometers and magnetometers will be displayed jointly because both are\nreconstructed from the same SSS basis vectors with the same numerical rank.\nThis also implies that both sensor types are not any longer statistically\nindependent.\nThese methods for evaluation can be used to assess model violations.\nAdditional\nintroductory materials can be found here &lt;https://goo.gl/ElWrxe&gt;.\nFor expert use cases or debugging the alternative estimators can also be\ncompared:\nEnd of explanation"}}},{"rowIdx":2176,"cells":{"Unnamed: 0":{"kind":"number","value":2176,"string":"2,176"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n A Simple Autoencoder\nWe'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.\nIn this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.\nStep1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.\nStep2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.\nExercise\nStep3: Training\nStep4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss. \nCalling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed).\nStep5: Checking out the results\nBelow I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts."},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0)\nExplanation: A Simple Autoencoder\nWe'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.\nIn this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.\nEnd of explanation\nimg = mnist.train.images[2]\nplt.imshow(img.reshape((28, 28)), cmap='Greys_r')\nExplanation: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.\nEnd of explanation\n# Size of the encoding layer (the hidden layer)\nencoding_dim = 32 # feel free to change this value\nimage_size = mnist.train.images.shape[1]\ninputs_ = tf.placeholder(tf.float32, shape=(None,image_size), name=\"inputs\")\ntargets_= tf.placeholder(tf.float32, shape=(None,image_size), name=\"targets\")\n# Output of hidden layer\nencoded = tf.layers.dense(inputs=inputs_, units=encoding_dim, activation=tf.nn.relu)\n# Output layer logits\nlogits = tf.layers.dense(inputs=encoded, units=image_size, activation=None)\n# Sigmoid output from logits\ndecoded = tf.nn.sigmoid(logits,name=\"output\")\n# Sigmoid cross-entropy loss\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_,logits=logits)\n# Mean of the loss\ncost = tf.reduce_mean(loss)\n# Adam optimizer\nopt = tf.train.AdamOptimizer(0.001).minimize(cost)\nExplanation: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.\nExercise: Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this tf.nn.sigmoid_cross_entropy_with_logits (documentation). You should note that tf.nn.sigmoid_cross_entropy_with_logits takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.\nEnd of explanation\n# Create the session\nsess = tf.Session()\nExplanation: Training\nEnd of explanation\nepochs = 20\nbatch_size = 200\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n feed = {inputs_: batch[0], targets_: batch[0]}\n batch_cost, _ = sess.run([cost, opt], feed_dict=feed)\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))\nExplanation: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss. \nCalling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed).\nEnd of explanation\nfig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nreconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})\nfor images, row in zip([in_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nfig.tight_layout(pad=0.1)\nsess.close()\nExplanation: Checking out the results\nBelow I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.\nEnd of explanation"}}},{"rowIdx":2177,"cells":{"Unnamed: 0":{"kind":"number","value":2177,"string":"2,177"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Histogram of one column by binning on another continuous\nStep1: Lets create a data frame of a column made up of 1's and 0's and another categorical column.\nStep2: Now, lets create histograms of the N column but using the class column as a grouping, using the 'by' param in hist()\nStep3: OK, lets weigh the creation of the binary column, using p in random.choice()\nStep4: OK, but what about using a continuous variable? We can use pandas.cut() to bin the continuous variable\nStep5: Lets create another dataframe using a binary column and the binning from above"},"code_prompt":{"kind":"string","value":"Python Code:\n%pylab inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nExplanation: Histogram of one column by binning on another continuous\nEnd of explanation\n# Class label would be categorical variable derived from binning the continuous column\nx = ['Class1']*300 + ['Class2']*400 + ['Class3']*300 \n# Column of random 0s and 1s\ny = np.random.choice([0,1], 1000)\n# Dataframe from the above variables\ndf = pd.DataFrame({'Class':x, 'N':y})\nExplanation: Lets create a data frame of a column made up of 1's and 0's and another categorical column.\nEnd of explanation\n# From this grouping, plot histograms\nplts = df['N'].hist(by=df['Class'])\nExplanation: Now, lets create histograms of the N column but using the class column as a grouping, using the 'by' param in hist():\nEnd of explanation\nx = ['Class1']*300 + ['Class2']*400 + ['Class3']*300\ny = np.random.choice([0,1], 1000, p=[0.25, 0.75])\ndf = pd.DataFrame({'Class':x, 'N':y})\n# grouped = df.groupby('Class')\nplts = df['N'].hist(by=df['Class'])\nExplanation: OK, lets weigh the creation of the binary column, using p in random.choice():\nEnd of explanation\n# Random x data: values from 0 - 9\nx = np.random.rand(1000) * 9\n# Here we bin the continuous x variable into bins (I set the end points to be from)\n# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\nbins = pd.cut(x, [0, 3, 6, 9])\nbins\nExplanation: OK, but what about using a continuous variable? We can use pandas.cut() to bin the continuous variable:\nEnd of explanation\n# Column of random 0s and 1s\ny = np.random.choice([0,1], 1000)\n# Data frame made from column of 0s and 1s and the other column the categorical binning of the continuous x data\ndf = pd.DataFrame({'y':y, 'Class': bins})\nplts = df['y'].hist(by=df['Class'])\n# Column of random 0s and 1s, weighed\ny = np.random.choice([0,1], 1000, p = [0.25, 0.75])\n# Data frame made from column of 0s and 1s and the other column the categorical binning of the continuous x data\ndf = pd.DataFrame({'y':y, 'Class': bins})\nplts = df['y'].hist(by=df['Class'])\nExplanation: Lets create another dataframe using a binary column and the binning from above:\nEnd of explanation"}}},{"rowIdx":2178,"cells":{"Unnamed: 0":{"kind":"number","value":2178,"string":"2,178"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n TensorFlow Data Validation (Advanced)\nLearning Objectives\nInstall TFDV\nCompute and visualize statistics\nInfer a schema\nCheck evaluation data for errors\nCheck for evaluation anomalies and fix it\nCheck for drift and skew\nFreeze the schema\nIntroduction\nThis notebook illustrates how TensorFlow Data Validation (TFDV) can be used to investigate and visualize your dataset. That includes looking at descriptive statistics, inferring a schema, checking for and fixing anomalies, and checking for drift and skew in our dataset. It's important to understand your dataset's characteristics, including how it might change over time in your production pipeline. It's also important to look for anomalies in your data, and to compare your training, evaluation, and serving datasets to make sure that they're consistent.\nWe'll use data from the Taxi Trips dataset released by the City of Chicago.\nNote\nStep1: Restart the kernel (Kernel > Restart kernel > Restart).\nRe-run the above cell and proceed further.\nNote\nStep2: Load the Files\nWe will download our dataset from Google Cloud Storage.\nStep3: Check the version\nStep4: Compute and visualize statistics\nFirst we'll use tfdv.generate_statistics_from_csv to compute statistics for our training data. (ignore the snappy warnings)\nTFDV can compute descriptive statistics that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions.\nInternally, TFDV uses Apache Beam's data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation.\nStep5: Now let's use tfdv.visualize_statistics, which uses Facets to create a succinct visualization of our training data\nStep6: Infer a schema\nNow let's use tfdv.infer_schema to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics.\nGetting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct. The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use tfdv.display_schema to display the inferred schema so that we can review it.\nStep7: Check evaluation data for errors\nSo far we've only been looking at the training data. It's important that our evaluation data is consistent with our training data, including that it uses the same schema. It's also important that the evaluation data includes examples of roughly the same ranges of values for our numerical features as our training data, so that our coverage of the loss surface during evaluation is roughly the same as during training. The same is true for categorical features. Otherwise, we may have training issues that are not identified during evaluation, because we didn't evaluate part of our loss surface.\nNotice that each feature now includes statistics for both the training and evaluation datasets.\nNotice that the charts now have both the training and evaluation datasets overlaid, making it easy to compare them.\nNotice that the charts now include a percentages view, which can be combined with log or the default linear scales.\nNotice that the mean and median for trip_miles are different for the training versus the evaluation datasets. Will that cause problems?\nWow, the max tips is very different for the training versus the evaluation datasets. Will that cause problems?\nClick expand on the Numeric Features chart, and select the log scale. Review the trip_seconds feature, and notice the difference in the max. Will evaluation miss parts of the loss surface?\nStep8: Check for evaluation anomalies\nDoes our evaluation dataset match the schema from our training dataset? This is especially important for categorical features, where we want to identify the range of acceptable values.\nKey Point\nStep9: Fix evaluation anomalies in the schema\nOops! It looks like we have some new values for company in our evaluation data, that we didn't have in our training data. We also have a new value for payment_type. These should be considered anomalies, but what we decide to do about them depends on our domain knowledge of the data. If an anomaly truly indicates a data error, then the underlying data should be fixed. Otherwise, we can simply update the schema to include the values in the eval dataset.\nKey Point\nStep10: Hey, look at that! We verified that the training and evaluation data are now consistent! Thanks TFDV ;)\nSchema Environments\nWe also split off a 'serving' dataset for this example, so we should check that too. By default all datasets in a pipeline should use the same schema, but there are often exceptions. For example, in supervised learning we need to include labels in our dataset, but when we serve the model for inference the labels will not be included. In some cases introducing slight schema variations is necessary.\nEnvironments can be used to express such requirements. In particular, features in schema can be associated with a set of environments using default_environment, in_environment and not_in_environment.\nFor example, in this dataset the tips feature is included as the label for training, but it's missing in the serving data. Without environment specified, it will show up as an anomaly.\nStep11: We'll deal with the tips feature below. We also have an INT value in our trip seconds, where our schema expected a FLOAT. By making us aware of that difference, TFDV helps uncover inconsistencies in the way the data is generated for training and serving. It's very easy to be unaware of problems like that until model performance suffers, sometimes catastrophically. It may or may not be a significant issue, but in any case this should be cause for further investigation.\nIn this case, we can safely convert INT values to FLOATs, so we want to tell TFDV to use our schema to infer the type. Let's do that now.\nStep12: Now we just have the tips feature (which is our label) showing up as an anomaly ('Column dropped'). Of course we don't expect to have labels in our serving data, so let's tell TFDV to ignore that.\nStep13: Check for drift and skew\nIn addition to checking whether a dataset conforms to the expectations set in the schema, TFDV also provides functionalities to detect drift and skew. TFDV performs this check by comparing the statistics of the different datasets based on the drift/skew comparators specified in the schema.\nDrift\nDrift detection is supported for categorical features and between consecutive spans of data (i.e., between span N and span N+1), such as between different days of training data. We express drift in terms of L-infinity distance, and you can set the threshold distance so that you receive warnings when the drift is higher than is acceptable. Setting the correct distance is typically an iterative process requiring domain knowledge and experimentation.\nSkew\nTFDV can detect three different kinds of skew in your data - schema skew, feature skew, and distribution skew.\nSchema Skew\nSchema skew occurs when the training and serving data do not conform to the same schema. Both training and serving data are expected to adhere to the same schema. Any expected deviations between the two (such as the label feature being only present in the training data but not in serving) should be specified through environments field in the schema.\nFeature Skew\nFeature skew occurs when the feature values that a model trains on are different from the feature values that it sees at serving time. For example, this can happen when\nStep14: In this example we do see some drift, but it is well below the threshold that we've set.\nFreeze the schema\nNow that the schema has been reviewed and curated, we will store it in a file to reflect its \"frozen\" state."},"code_prompt":{"kind":"string","value":"Python Code:\n!pip install pyarrow==5.0.0\n!pip install numpy==1.19.2\n!pip install tensorflow-data-validation\nExplanation: TensorFlow Data Validation (Advanced)\nLearning Objectives\nInstall TFDV\nCompute and visualize statistics\nInfer a schema\nCheck evaluation data for errors\nCheck for evaluation anomalies and fix it\nCheck for drift and skew\nFreeze the schema\nIntroduction\nThis notebook illustrates how TensorFlow Data Validation (TFDV) can be used to investigate and visualize your dataset. That includes looking at descriptive statistics, inferring a schema, checking for and fixing anomalies, and checking for drift and skew in our dataset. It's important to understand your dataset's characteristics, including how it might change over time in your production pipeline. It's also important to look for anomalies in your data, and to compare your training, evaluation, and serving datasets to make sure that they're consistent.\nWe'll use data from the Taxi Trips dataset released by the City of Chicago.\nNote: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.\nRead more about the dataset in Google BigQuery. Explore the full dataset in the BigQuery UI.\nKey Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about ML fairness.\nEach learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the Solution Notebook for reference. \nThe columns in the dataset are:\n\n\n\n\n\n\n\n
pickup_community_areafaretrip_start_month
trip_start_hourtrip_start_daytrip_start_timestamp
pickup_latitudepickup_longitudedropoff_latitude
dropoff_longitudetrip_milespickup_census_tract
dropoff_census_tractpayment_typecompany
trip_secondsdropoff_community_areatips
\nInstall Libraries\nEnd of explanation\nimport pandas as pd\nimport tensorflow_data_validation as tfdv\nimport sys\nimport warnings\nwarnings.filterwarnings('ignore')\nprint('Installing TensorFlow Data Validation')\n!pip install -q tensorflow_data_validation[visualization]\nExplanation: Restart the kernel (Kernel > Restart kernel > Restart).\nRe-run the above cell and proceed further.\nNote: Please ignore any incompatibility warnings and errors.\nInstall TFDV\nThis will pull in all the dependencies, which will take a minute. Please ignore the warnings or errors regarding incompatible dependency versions.\nEnd of explanation\nimport os\nimport tempfile, urllib, zipfile\n# Set up some globals for our file paths\nBASE_DIR = tempfile.mkdtemp()\nDATA_DIR = os.path.join(BASE_DIR, 'data')\nOUTPUT_DIR = os.path.join(BASE_DIR, 'chicago_taxi_output')\nTRAIN_DATA = os.path.join(DATA_DIR, 'train', 'data.csv')\nEVAL_DATA = os.path.join(DATA_DIR, 'eval', 'data.csv')\nSERVING_DATA = os.path.join(DATA_DIR, 'serving', 'data.csv')\n# Download the zip file from GCP and unzip it\nzip, headers = urllib.request.urlretrieve('https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/chicago_data.zip')\nzipfile.ZipFile(zip).extractall(BASE_DIR)\nzipfile.ZipFile(zip).close()\nprint(\"Here's what we downloaded:\")\n!ls -R {os.path.join(BASE_DIR, 'data')}\nExplanation: Load the Files\nWe will download our dataset from Google Cloud Storage.\nEnd of explanation\nimport tensorflow_data_validation as tfdv\nprint('TFDV version: {}'.format(tfdv.version.__version__))\nExplanation: Check the version\nEnd of explanation\n# Compute data statistics from CSV files.\n# TODO: Your code goes here\nExplanation: Compute and visualize statistics\nFirst we'll use tfdv.generate_statistics_from_csv to compute statistics for our training data. (ignore the snappy warnings)\nTFDV can compute descriptive statistics that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions.\nInternally, TFDV uses Apache Beam's data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation.\nEnd of explanation\n# Visualize the input statistics using Facets.\n# TODO: Your code goes here\nExplanation: Now let's use tfdv.visualize_statistics, which uses Facets to create a succinct visualization of our training data:\nNotice that numeric features and categorical features are visualized separately, and that charts are displayed showing the distributions for each feature.\nNotice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature.\nNotice that there are no examples with values for pickup_census_tract. This is an opportunity for dimensionality reduction!\nTry clicking \"expand\" above the charts to change the display\nTry hovering over bars in the charts to display bucket ranges and counts\nTry switching between the log and linear scales, and notice how the log scale reveals much more detail about the payment_type categorical feature\nTry selecting \"quantiles\" from the \"Chart to show\" menu, and hover over the markers to show the quantile percentages\nEnd of explanation\n# Infers schema from the input statistics.\n# TODO: Your code goes here\ntfdv.display_schema(schema=schema)\nExplanation: Infer a schema\nNow let's use tfdv.infer_schema to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics.\nGetting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct. The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use tfdv.display_schema to display the inferred schema so that we can review it.\nEnd of explanation\n# Compute stats for evaluation data\neval_stats = tfdv.generate_statistics_from_csv(data_location=EVAL_DATA)\n# Compare evaluation data with training data\ntfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats,\n lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')\nExplanation: Check evaluation data for errors\nSo far we've only been looking at the training data. It's important that our evaluation data is consistent with our training data, including that it uses the same schema. It's also important that the evaluation data includes examples of roughly the same ranges of values for our numerical features as our training data, so that our coverage of the loss surface during evaluation is roughly the same as during training. The same is true for categorical features. Otherwise, we may have training issues that are not identified during evaluation, because we didn't evaluate part of our loss surface.\nNotice that each feature now includes statistics for both the training and evaluation datasets.\nNotice that the charts now have both the training and evaluation datasets overlaid, making it easy to compare them.\nNotice that the charts now include a percentages view, which can be combined with log or the default linear scales.\nNotice that the mean and median for trip_miles are different for the training versus the evaluation datasets. Will that cause problems?\nWow, the max tips is very different for the training versus the evaluation datasets. Will that cause problems?\nClick expand on the Numeric Features chart, and select the log scale. Review the trip_seconds feature, and notice the difference in the max. Will evaluation miss parts of the loss surface?\nEnd of explanation\n# Check eval data for errors by validating the eval data stats using the previously inferred schema.\n# TODO: Your code goes here\ntfdv.display_anomalies(anomalies)\nExplanation: Check for evaluation anomalies\nDoes our evaluation dataset match the schema from our training dataset? This is especially important for categorical features, where we want to identify the range of acceptable values.\nKey Point: What would happen if we tried to evaluate using data with categorical feature values that were not in our training dataset? What about numeric features that are outside the ranges in our training dataset?\nEnd of explanation\n# Relax the minimum fraction of values that must come from the domain for feature company.\ncompany = tfdv.get_feature(schema, 'company')\ncompany.distribution_constraints.min_domain_mass = 0.9\n# Add new value to the domain of feature payment_type.\npayment_type_domain = tfdv.get_domain(schema, 'payment_type')\npayment_type_domain.value.append('Prcard')\n# Validate eval stats after updating the schema \n# TODO: Your code goes here\ntfdv.display_anomalies(updated_anomalies)\nExplanation: Fix evaluation anomalies in the schema\nOops! It looks like we have some new values for company in our evaluation data, that we didn't have in our training data. We also have a new value for payment_type. These should be considered anomalies, but what we decide to do about them depends on our domain knowledge of the data. If an anomaly truly indicates a data error, then the underlying data should be fixed. Otherwise, we can simply update the schema to include the values in the eval dataset.\nKey Point: How would our evaluation results be affected if we did not fix these problems?\nUnless we change our evaluation dataset we can't fix everything, but we can fix things in the schema that we're comfortable accepting. That includes relaxing our view of what is and what is not an anomaly for particular features, as well as updating our schema to include missing values for categorical features. TFDV has enabled us to discover what we need to fix.\nLet's make those fixes now, and then review one more time.\nEnd of explanation\nserving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA)\nserving_anomalies = tfdv.validate_statistics(serving_stats, schema)\ntfdv.display_anomalies(serving_anomalies)\nExplanation: Hey, look at that! We verified that the training and evaluation data are now consistent! Thanks TFDV ;)\nSchema Environments\nWe also split off a 'serving' dataset for this example, so we should check that too. By default all datasets in a pipeline should use the same schema, but there are often exceptions. For example, in supervised learning we need to include labels in our dataset, but when we serve the model for inference the labels will not be included. In some cases introducing slight schema variations is necessary.\nEnvironments can be used to express such requirements. In particular, features in schema can be associated with a set of environments using default_environment, in_environment and not_in_environment.\nFor example, in this dataset the tips feature is included as the label for training, but it's missing in the serving data. Without environment specified, it will show up as an anomaly.\nEnd of explanation\noptions = tfdv.StatsOptions(schema=schema, infer_type_from_schema=True)\nserving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA, stats_options=options)\nserving_anomalies = tfdv.validate_statistics(serving_stats, schema)\ntfdv.display_anomalies(serving_anomalies)\nExplanation: We'll deal with the tips feature below. We also have an INT value in our trip seconds, where our schema expected a FLOAT. By making us aware of that difference, TFDV helps uncover inconsistencies in the way the data is generated for training and serving. It's very easy to be unaware of problems like that until model performance suffers, sometimes catastrophically. It may or may not be a significant issue, but in any case this should be cause for further investigation.\nIn this case, we can safely convert INT values to FLOATs, so we want to tell TFDV to use our schema to infer the type. Let's do that now.\nEnd of explanation\n# All features are by default in both TRAINING and SERVING environments.\nschema.default_environment.append('TRAINING')\nschema.default_environment.append('SERVING')\n# Specify that 'tips' feature is not in SERVING environment.\ntfdv.get_feature(schema, 'tips').not_in_environment.append('SERVING')\nserving_anomalies_with_env = tfdv.validate_statistics(\n serving_stats, schema, environment='SERVING')\ntfdv.display_anomalies(serving_anomalies_with_env)\nExplanation: Now we just have the tips feature (which is our label) showing up as an anomaly ('Column dropped'). Of course we don't expect to have labels in our serving data, so let's tell TFDV to ignore that.\nEnd of explanation\n# Add skew comparator for 'payment_type' feature.\npayment_type = tfdv.get_feature(schema, 'payment_type')\npayment_type.skew_comparator.infinity_norm.threshold = 0.01\n# Add drift comparator for 'company' feature.\ncompany=tfdv.get_feature(schema, 'company')\ncompany.drift_comparator.infinity_norm.threshold = 0.001\n# TODO: Your code goes here\ntfdv.display_anomalies(skew_anomalies)\nExplanation: Check for drift and skew\nIn addition to checking whether a dataset conforms to the expectations set in the schema, TFDV also provides functionalities to detect drift and skew. TFDV performs this check by comparing the statistics of the different datasets based on the drift/skew comparators specified in the schema.\nDrift\nDrift detection is supported for categorical features and between consecutive spans of data (i.e., between span N and span N+1), such as between different days of training data. We express drift in terms of L-infinity distance, and you can set the threshold distance so that you receive warnings when the drift is higher than is acceptable. Setting the correct distance is typically an iterative process requiring domain knowledge and experimentation.\nSkew\nTFDV can detect three different kinds of skew in your data - schema skew, feature skew, and distribution skew.\nSchema Skew\nSchema skew occurs when the training and serving data do not conform to the same schema. Both training and serving data are expected to adhere to the same schema. Any expected deviations between the two (such as the label feature being only present in the training data but not in serving) should be specified through environments field in the schema.\nFeature Skew\nFeature skew occurs when the feature values that a model trains on are different from the feature values that it sees at serving time. For example, this can happen when:\nA data source that provides some feature values is modified between training and serving time\nThere is different logic for generating features between training and serving. For example, if you apply some transformation only in one of the two code paths.\nDistribution Skew\nDistribution skew occurs when the distribution of the training dataset is significantly different from the distribution of the serving dataset. One of the key causes for distribution skew is using different code or different data sources to generate the training dataset. Another reason is a faulty sampling mechanism that chooses a non-representative subsample of the serving data to train on.\nEnd of explanation\nfrom tensorflow.python.lib.io import file_io\nfrom google.protobuf import text_format\nfile_io.recursive_create_dir(OUTPUT_DIR)\nschema_file = os.path.join(OUTPUT_DIR, 'schema.pbtxt')\ntfdv.write_schema_text(schema, schema_file)\n!cat {schema_file}\nExplanation: In this example we do see some drift, but it is well below the threshold that we've set.\nFreeze the schema\nNow that the schema has been reviewed and curated, we will store it in a file to reflect its \"frozen\" state.\nEnd of explanation"}}},{"rowIdx":2179,"cells":{"Unnamed: 0":{"kind":"number","value":2179,"string":"2,179"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Departamento de Física - Faculdade de Ciências e Tecnologia da Universidade de Coimbra\nFísica Computacional - Ficha 3 - Integração e Diferenciação Numérica\nRafael Isaque Santos - 2012144694 - Licenciatura em Física\nStep1: 1 - Cálculo do integral $\\int _{0}^{\\pi} e^{x} \\cos(x) \\; dx$\nStep2: Integrando $e^{x} \\cos (x)$\nStep3: Derivar $e^{x} \\sin(x) + e^{-x} \\cos(x)$ nos pontos\nStep4: Calcular o integral\nStep5: Usando as transformações\n$x = \\frac{y}{1-y}$\n$x = \\tan \\left[ \\frac{\\pi}{4} (1+y) \\right]$\nIntegral Duplo de\n$\\int {0}^{1} \\left( \\int {-\\sqrt{1-y^{2}}} ^{\\sqrt{1-y^{2}}} \\, dx \\right) dy$\nStep6: Integral Duplo de\n$\\int {0}^{1} \\left( \\int {-\\sqrt{1-y^{2}}} ^{\\sqrt{1-y^{2}}} e^{-xy} \\, dx \\right) dy$"},"code_prompt":{"kind":"string","value":"Python Code:\nfrom numpy import sin, cos, tan, pi, e, exp, log, copy, linspace\nfrom numpy.polynomial.legendre import leggauss\nn_list = [2, 4, 8, 10, 20, 30, 50, 100]\nExplanation: Departamento de Física - Faculdade de Ciências e Tecnologia da Universidade de Coimbra\nFísica Computacional - Ficha 3 - Integração e Diferenciação Numérica\nRafael Isaque Santos - 2012144694 - Licenciatura em Física\nEnd of explanation\nf1 = lambda x: exp(x) * cos(x)\nf1_sol = -(exp(pi) + 1) / 2\ntrapezios_simples = lambda f, a, b: (b-a)/2 * (f(a) + f(b))\nsimpson13_simples = lambda f, a, b: ((b-a)/3) * (f(a) + 4*f((a + b)/2) + f(b))\nsimpson38_simples = lambda f, a, b: (3/8)*(b-a) * (f(a) + 3*f((2*a + b)/3) + 3*f((a + 2*b)/3) + f(b))\ndef trapezios_composta(f, a, b, n):\n h = (b-a)/n\n xi = a\n s_int = 0\n for i in range(n):\n s_int += f(xi) + f(xi+h)\n xi += h\n s_int *= h/2\n return s_int\ndef simpson13_composta(f, a, b, n):\n h = (b-a)/n\n x = linspace(a, b, n+1)\n s_int = 0\n for i in range(0, n, 2):\n s_int += f(x[i]) + 4*f(x[i+1]) + f(x[i+2])\n s_int *= h/3\n return s_int\nfrom sympy import oo # símbolo 'infinito'\ndef gausslegendre(f, a, b, x_pts, w_pts):\n x_gl = copy(x_pts)\n w_gl = copy(w_pts)\n def gl_sum(f, x_list, w_list):\n s_int = 0\n for x, w in zip(x_list, w_list):\n s_int += w * f(x)\n return s_int\n if (a == -1 and b == 1): return gl_sum(f, x_gl, w_gl)\n elif (a == 0 and b == oo):\n x_inf = list(map(lambda x: tan( pi/4 * (1+x)), copy(x_pts)))\n w_inf = list(map(lambda w, x: pi/4 * w/(cos(pi/4 * (1+x)))**2, copy(w_pts), copy(x_pts)))\n return gl_sum(f, x_inf, w_inf)\n else:\n h = (b-a)/2\n xi = list(map(lambda x: h * (x + 1) + a, x_gl))\n return h * gl_sum(f, xi, w_gl)\ndef erro_rel(est, real):\n if real == 0: return abs((est-real)/(est+real)) * 100\n else: return abs((est-real)/real) * 100\ndef aval_simples(f, a, b, real_value):\n print('Utilizando os métodos:')\n trap_si = trapezios_simples(f, a, b)\n print('Trapézio Simples: ' + str(trap_si) + ' Erro Relativo: ' + str(erro_rel(trap_si, real_value)) + ' %')\n simps13_si = simpson13_simples(f, a, b)\n print('Simpson 1/3 Simples: ' + str(simps13_si) + ' Erro Relativo: ' + str(erro_rel(simps13_si, real_value)) + ' %')\n simps38_si = simpson38_simples(f, a, b)\n print('Simpson 3/8 Simples: ' + str(simps38_si) + ' Erro Relativo: ' + str(erro_rel(simps38_si, real_value)) + ' %')\ndef aval_composta(f, a, b, n, x_n, w_n, real_value):\n print('Utilizando os métodos: [N = ' + str(n) + '] \\n')\n trap_c = trapezios_composta(f, a, b, n)\n print('Trapézios Composta: ' + str(trap_c) + ' Erro Relativo: ' + str(erro_rel(trap_c, real_value)))\n simp_13_c = simpson13_composta(f, a, b, n)\n print('Simpson Composta: ' + str(simp_13_c) + ' Erro Relativo: ' + str(erro_rel(simp_13_c, real_value)))\n gaule_ab = gausslegendre(f, a, b, x_n, w_n)\n print('Gauss-Legendre: ' + str(gaule_ab) + ' Erro Relativo: ' + str(erro_rel(gaule_ab, real_value)))\n print('\\n')\nExplanation: 1 - Cálculo do integral $\\int _{0}^{\\pi} e^{x} \\cos(x) \\; dx$\nEnd of explanation\naval_simples(f1, 0, pi, f1_sol)\nfor n in n_list:\n x_i, w_i = leggauss(n)\n aval_composta(f1, 0, pi, n, x_i, w_i, f1_sol)\nExplanation: Integrando $e^{x} \\cos (x)$\nEnd of explanation\nf2 = lambda x: exp(x)*sin(x) + exp(-x)*cos(x)\nf2_sol = lambda x: (exp(x)-exp(-x)) * (sin(x) + cos(x))\nx_2 = [0, pi/4, pi/2, 3*pi/4, pi]\nh_2 = [0.1, 0.05, 0.01]\ndf_2pts = lambda f, x, h: (f(x+h) - f(x)) / h\ndf_3pts = lambda f, x, h: (-f(x + 2*h) + 4*f(x+h) - 3*f(x)) / (2*h)\ndf_5pts = lambda f, x, h: (-3*f(x+4*h) + 16*f(x+3*h) - 36*f(x+2*h) + 48*f(x+h) - 25*f(x)) / (12*h)\nfor x in x_2:\n print('\\nDerivada de f(' + str(x) + ') :' )\n d_sol = f2_sol(x)\n print('Valor real = ' + str(d_sol))\n for h in h_2:\n print('com passo \\'h\\' = ' + str(h) + ' :')\n d2r = df_2pts(f2, x, h)\n print('Fórmula a 2 pontos: ' + str(d2r) + ' Erro relativo: ' + str(erro_rel(d2r, d_sol)))\n d3r = df_3pts(f2, x, h)\n print('Fórmula a 3 pontos: ' + str(d3r) + ' Erro relativo: ' + str(erro_rel(d3r, d_sol)))\n d5r = df_5pts(f2, x, h)\n print('Fórmula a 5 pontos: ' + str(d5r) + ' Erro relativo: ' + str(erro_rel(d5r, d_sol)))\nExplanation: Derivar $e^{x} \\sin(x) + e^{-x} \\cos(x)$ nos pontos:\nx = 0, $\\frac{\\pi}{4}$, $\\frac{\\pi}{2}$, $\\frac{3\\pi}{4}$ e $\\pi$.\nUtilizando as fórmulas a 2, 3 e 5 pontos.\ncom passos h = 0.1, 0.05, 0.01\nEnd of explanation\nxi, wi = leggauss(100)\nf3 = lambda x: x / (1+x)**4\ngausslegendre(f3, 0, oo, xi, wi)\nExplanation: Calcular o integral:\n$\\int _{0}^{\\infty} \\frac{x dx}{(1+x)^{4}}$\nEnd of explanation\ngausslegendre((lambda y: gausslegendre((lambda x: 1), -(1-y**2)**(1/2), (1-y**2)**(1/2) , xi, wi)), 0, 1, xi, wi)\nExplanation: Usando as transformações\n$x = \\frac{y}{1-y}$\n$x = \\tan \\left[ \\frac{\\pi}{4} (1+y) \\right]$\nIntegral Duplo de\n$\\int {0}^{1} \\left( \\int {-\\sqrt{1-y^{2}}} ^{\\sqrt{1-y^{2}}} \\, dx \\right) dy$\nEnd of explanation\ngausslegendre((lambda y: gausslegendre(lambda x: e**(-x*y), -(1-y**2)**(1/2), (1-y**2)**(1/2), xi, wi)), 0, 1, xi, wi)\nExplanation: Integral Duplo de\n$\\int {0}^{1} \\left( \\int {-\\sqrt{1-y^{2}}} ^{\\sqrt{1-y^{2}}} e^{-xy} \\, dx \\right) dy$\nEnd of explanation"}}},{"rowIdx":2180,"cells":{"Unnamed: 0":{"kind":"number","value":2180,"string":"2,180"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Copyright 2020 The OpenFermion Developers\nStep1: Circuits 1\nStep2: Background\nSecond quantized fermionic operators\nIn order to represent fermionic systems on a quantum computer one must first discretize space. Usually, one expands the many-body wavefunction in a basis of spin-orbitals $\\varphi_p = \\varphi_p(r)$ which are single-particle basis functions. For reasons of spatial efficiency, all NISQ (and even most error-corrected) algorithms for simulating fermionic systems focus on representing operators in second-quantization. Second-quantized operators are expressed using the fermionic creation and annihilation operators, $a^\\dagger_p$ and $a_p$. The action of $a^\\dagger_p$ is to excite a fermion in spin-orbital $\\varphi_p$ and the action of $a_p$ is to annihilate a fermion from spin-orbital $\\varphi_p$. Specifically, if electron $i$ is represented in a space of spin-orbitals ${\\varphi_p(r_i)}$ then $a^\\dagger_p$ and $a_p$ are related to Slater determinants through the equivalence,\n$$\n\\langle r_0 \\cdots r_{\\eta-1} | a^\\dagger_{0} \\cdots a^\\dagger_{\\eta-1} | \\varnothing\\rangle \\equiv \\sqrt{\\frac{1}{\\eta!}}\n\\begin{vmatrix}\n\\varphi_{0}\\left(r_0\\right) & \\varphi_{1}\\left( r_0\\right) & \\cdots & \\varphi_{\\eta-1} \\left( r_0\\right) \\\n\\varphi_{0}\\left(r_1\\right) & \\varphi_{1}\\left( r_1\\right) & \\cdots & \\varphi_{\\eta-1} \\left( r_1\\right) \\\n\\vdots & \\vdots & \\ddots & \\vdots\\\n\\varphi_{0}\\left(r_{\\eta-1}\\right) & \\varphi_{1}\\left(r_{\\eta-1}\\right) & \\cdots & \\varphi_{\\eta-1} \\left(r_{\\eta-1}\\right) \\end{vmatrix}\n$$\nwhere $\\eta$ is the number of electrons in the system, $|\\varnothing \\rangle$ is the Fermi vacuum and $\\varphi_p(r)=\\langle r|\\varphi_p \\rangle$ are the single-particle orbitals that define the basis. By using a basis of Slater determinants, we ensure antisymmetry in the encoded state.\nRotations of the single-particle basis\nVery often in electronic structure calculations one would like to rotate the single-particle basis. That is, one would like to generate new orbitals that are formed from a linear combination of the old orbitals. Any particle-conserving rotation of the single-particle basis can be expressed as\n$$\n\\tilde{\\varphi}p = \\sum{q} \\varphi_q u_{pq}\n\\quad\n\\tilde{a}^\\dagger_p = \\sum_{q} a^\\dagger_q u_{pq}\n\\quad\n\\tilde{a}p = \\sum{q} a_q u_{pq}^*\n$$\nwhere $\\tilde{\\varphi}p$, $\\tilde{a}^\\dagger_p$, and $\\tilde{a}^\\dagger_p$ correspond to spin-orbitals and operators in the rotated basis and $u$ is an $N\\times N$ unitary matrix. From the Thouless theorem, this single-particle rotation\nis equivalent to applying the $2^N \\times 2^N$ operator\n$$\n U(u) = \\exp\\left(\\sum{pq} \\left[\\log u \\right]{pq} \\left(a^\\dagger_p a_q - a^\\dagger_q a_p\\right)\\right) \n$$\nwhere $\\left[\\log u\\right]{pq}$ is the $(p, q)$ element of the matrix $\\log u$.\nThere are many reasons that one might be interested in performing such basis rotations. For instance, one might be interested in preparing the Hartree-Fock (mean-field) state of a chemical system, by rotating from some initial orbitals (e.g. atomic orbitals or plane waves) into the molecular orbitals of the system. Alternatively, one might be interested in rotating from a basis where certain operators are diagonal (e.g. the kinetic operator is diagonal in the plane wave basis) to a basis where certain other operators are diagonal (e.g. the Coulomb operator is diagonal in the position basis). Thus, it is a very useful thing to be able to apply circuits corresponding to $U(u)$ on a quantum computer in low depth.\nCompiling linear depth circuits to rotate the orbital basis\nOpenFermion prominently features routines for implementing the linear depth / linear connectivity basis transformations described in Phys. Rev. Lett. 120, 110501. While we will not discuss this functionality here, we also support routines for compiling the more general form of these transformations which do not conserve particle-number, known as a Bogoliubov transformation, using routines described in Phys. Rev. Applied 9, 044036. We will not discuss the details of how these methods are implemented here and instead refer readers to those papers. All that one needs in order to compile the circuit $U(u)$ using OpenFermion is the $N \\times N$ matrix $u$, which we refer to in documentation as the \"basis_transformation_matrix\". Note that if one intends to apply this matrix to a computational basis state with only $\\eta$ electrons, then one can reduce the number of gates required by instead supplying the $\\eta \\times N$ rectangular matrix that characterizes the rotation of the occupied orbitals only. OpenFermion will automatically take advantage of this symmetry.\nOpenFermion example implementation\nStep3: Now we're ready to make a circuit! First we will use OpenFermion to generate the basis transform $U(u)$ from the basis transformation matrix $u$ by calling the Bogoliubov transform function (named as such because this function can also handle non-particle conserving basis transformations). Then, we'll apply local $Z$ rotations to phase by the eigenvalues, then we'll apply the inverse transformation. That will finish the circuit. We're just going to print out the first rotation to keep things easy-to-read, but feel free to play around with the notebook.\nStep4: Finally, we can check whether our circuit applied to a random initial state with the exact result. Print out the fidelity with the exact result.\nStep5: Thus, we see that the circuit correctly effects the intended evolution. We can now use Cirq's compiler to output the circuit using gates native to near-term devices, and then optimize those circuits. We'll output in QASM 2.0 just to demonstrate that functionality."},"code_prompt":{"kind":"string","value":"Python Code:\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nExplanation: Copyright 2020 The OpenFermion Developers\nEnd of explanation\ntry:\n import openfermion\nexcept ImportError:\n !pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion\nExplanation: Circuits 1: Compiling arbitrary single-particle basis rotations in linear depth\n\n \n \n \n \n
\n View on QuantumAI\n \n Run in Google Colab\n \n View source on GitHub\n \n Download notebook\n
\nThis is the first of several tutorials demonstrating the compilation of quantum circuits. These tutorials build on one another and should be studied in order. In this tutorial we will discuss the compilation of circuits for implementing arbitrary rotations of the single-particle basis of an electronic structure simulation. As an example, we show how one can use these methods to simulate the evolution of an arbitrary non-interacting fermion model.\nSetup\nInstall the OpenFermion package:\nEnd of explanation\nimport openfermion\nimport numpy\n# Set the number of qubits in our example.\nn_qubits = 3\nsimulation_time = 1.\nrandom_seed = 8317\n# Generate the random one-body operator.\nT = openfermion.random_hermitian_matrix(n_qubits, seed=random_seed)\n# Diagonalize T and obtain basis transformation matrix (aka \"u\").\neigenvalues, eigenvectors = numpy.linalg.eigh(T)\nbasis_transformation_matrix = eigenvectors.transpose()\n# Print out familiar OpenFermion \"FermionOperator\" form of H.\nH = openfermion.FermionOperator()\nfor p in range(n_qubits):\n for q in range(n_qubits):\n term = ((p, 1), (q, 0))\n H += openfermion.FermionOperator(term, T[p, q])\nprint(H)\nExplanation: Background\nSecond quantized fermionic operators\nIn order to represent fermionic systems on a quantum computer one must first discretize space. Usually, one expands the many-body wavefunction in a basis of spin-orbitals $\\varphi_p = \\varphi_p(r)$ which are single-particle basis functions. For reasons of spatial efficiency, all NISQ (and even most error-corrected) algorithms for simulating fermionic systems focus on representing operators in second-quantization. Second-quantized operators are expressed using the fermionic creation and annihilation operators, $a^\\dagger_p$ and $a_p$. The action of $a^\\dagger_p$ is to excite a fermion in spin-orbital $\\varphi_p$ and the action of $a_p$ is to annihilate a fermion from spin-orbital $\\varphi_p$. Specifically, if electron $i$ is represented in a space of spin-orbitals ${\\varphi_p(r_i)}$ then $a^\\dagger_p$ and $a_p$ are related to Slater determinants through the equivalence,\n$$\n\\langle r_0 \\cdots r_{\\eta-1} | a^\\dagger_{0} \\cdots a^\\dagger_{\\eta-1} | \\varnothing\\rangle \\equiv \\sqrt{\\frac{1}{\\eta!}}\n\\begin{vmatrix}\n\\varphi_{0}\\left(r_0\\right) & \\varphi_{1}\\left( r_0\\right) & \\cdots & \\varphi_{\\eta-1} \\left( r_0\\right) \\\n\\varphi_{0}\\left(r_1\\right) & \\varphi_{1}\\left( r_1\\right) & \\cdots & \\varphi_{\\eta-1} \\left( r_1\\right) \\\n\\vdots & \\vdots & \\ddots & \\vdots\\\n\\varphi_{0}\\left(r_{\\eta-1}\\right) & \\varphi_{1}\\left(r_{\\eta-1}\\right) & \\cdots & \\varphi_{\\eta-1} \\left(r_{\\eta-1}\\right) \\end{vmatrix}\n$$\nwhere $\\eta$ is the number of electrons in the system, $|\\varnothing \\rangle$ is the Fermi vacuum and $\\varphi_p(r)=\\langle r|\\varphi_p \\rangle$ are the single-particle orbitals that define the basis. By using a basis of Slater determinants, we ensure antisymmetry in the encoded state.\nRotations of the single-particle basis\nVery often in electronic structure calculations one would like to rotate the single-particle basis. That is, one would like to generate new orbitals that are formed from a linear combination of the old orbitals. Any particle-conserving rotation of the single-particle basis can be expressed as\n$$\n\\tilde{\\varphi}p = \\sum{q} \\varphi_q u_{pq}\n\\quad\n\\tilde{a}^\\dagger_p = \\sum_{q} a^\\dagger_q u_{pq}\n\\quad\n\\tilde{a}p = \\sum{q} a_q u_{pq}^*\n$$\nwhere $\\tilde{\\varphi}p$, $\\tilde{a}^\\dagger_p$, and $\\tilde{a}^\\dagger_p$ correspond to spin-orbitals and operators in the rotated basis and $u$ is an $N\\times N$ unitary matrix. From the Thouless theorem, this single-particle rotation\nis equivalent to applying the $2^N \\times 2^N$ operator\n$$\n U(u) = \\exp\\left(\\sum{pq} \\left[\\log u \\right]{pq} \\left(a^\\dagger_p a_q - a^\\dagger_q a_p\\right)\\right) \n$$\nwhere $\\left[\\log u\\right]{pq}$ is the $(p, q)$ element of the matrix $\\log u$.\nThere are many reasons that one might be interested in performing such basis rotations. For instance, one might be interested in preparing the Hartree-Fock (mean-field) state of a chemical system, by rotating from some initial orbitals (e.g. atomic orbitals or plane waves) into the molecular orbitals of the system. Alternatively, one might be interested in rotating from a basis where certain operators are diagonal (e.g. the kinetic operator is diagonal in the plane wave basis) to a basis where certain other operators are diagonal (e.g. the Coulomb operator is diagonal in the position basis). Thus, it is a very useful thing to be able to apply circuits corresponding to $U(u)$ on a quantum computer in low depth.\nCompiling linear depth circuits to rotate the orbital basis\nOpenFermion prominently features routines for implementing the linear depth / linear connectivity basis transformations described in Phys. Rev. Lett. 120, 110501. While we will not discuss this functionality here, we also support routines for compiling the more general form of these transformations which do not conserve particle-number, known as a Bogoliubov transformation, using routines described in Phys. Rev. Applied 9, 044036. We will not discuss the details of how these methods are implemented here and instead refer readers to those papers. All that one needs in order to compile the circuit $U(u)$ using OpenFermion is the $N \\times N$ matrix $u$, which we refer to in documentation as the \"basis_transformation_matrix\". Note that if one intends to apply this matrix to a computational basis state with only $\\eta$ electrons, then one can reduce the number of gates required by instead supplying the $\\eta \\times N$ rectangular matrix that characterizes the rotation of the occupied orbitals only. OpenFermion will automatically take advantage of this symmetry.\nOpenFermion example implementation: exact evolution under tight binding models\nIn this example will show how basis transforms can be used to implement exact evolution under a random Hermitian one-body fermionic operator\n\\begin{equation}\nH = \\sum_{pq} T_{pq} a^\\dagger_p a_q.\n\\end{equation}\nThat is, we will compile a circuit to implement $e^{-i H t}$ for some time $t$. Of course, this is a tractable problem classically but we discuss it here since it is often useful as a subroutine for more complex quantum simulations. To accomplish this evolution, we will use basis transformations. Suppose that $u$ is the basis transformation matrix that diagonalizes $T$. Then, we could implement $e^{-i H t}$ by implementing $U(u)^\\dagger (\\prod_{k} e^{-i \\lambda_k Z_k}) U(u)$ where $\\lambda_k$ are the eigenvalues of $T$. \nBelow, we initialize the T matrix characterizing $H$ and then obtain the eigenvalues $\\lambda_k$ and eigenvectors $u_k$ of $T$. We print out the OpenFermion FermionOperator representation of $T$.\nEnd of explanation\nimport openfermion\nimport cirq\nimport cirq_google\n# Initialize the qubit register.\nqubits = cirq.LineQubit.range(n_qubits)\n# Start circuit with the inverse basis rotation, print out this step.\ninverse_basis_rotation = cirq.inverse(openfermion.bogoliubov_transform(qubits, basis_transformation_matrix))\ncircuit = cirq.Circuit(inverse_basis_rotation)\nprint(circuit)\n# Add diagonal phase rotations to circuit.\nfor k, eigenvalue in enumerate(eigenvalues):\n phase = -eigenvalue * simulation_time\n circuit.append(cirq.rz(rads=phase).on(qubits[k]))\n# Finally, restore basis.\nbasis_rotation = openfermion.bogoliubov_transform(qubits, basis_transformation_matrix)\ncircuit.append(basis_rotation)\nExplanation: Now we're ready to make a circuit! First we will use OpenFermion to generate the basis transform $U(u)$ from the basis transformation matrix $u$ by calling the Bogoliubov transform function (named as such because this function can also handle non-particle conserving basis transformations). Then, we'll apply local $Z$ rotations to phase by the eigenvalues, then we'll apply the inverse transformation. That will finish the circuit. We're just going to print out the first rotation to keep things easy-to-read, but feel free to play around with the notebook.\nEnd of explanation\n# Initialize a random initial state.\ninitial_state = openfermion.haar_random_vector(\n 2 ** n_qubits, random_seed).astype(numpy.complex64)\n# Numerically compute the correct circuit output.\nimport scipy\nhamiltonian_sparse = openfermion.get_sparse_operator(H)\nexact_state = scipy.sparse.linalg.expm_multiply(\n -1j * simulation_time * hamiltonian_sparse, initial_state)\n# Use Cirq simulator to apply circuit.\nsimulator = cirq.Simulator()\nresult = simulator.simulate(circuit, qubit_order=qubits,\n initial_state=initial_state)\nsimulated_state = result.final_state_vector\n# Print final fidelity.\nfidelity = abs(numpy.dot(simulated_state, numpy.conjugate(exact_state)))**2\nprint(fidelity)\nExplanation: Finally, we can check whether our circuit applied to a random initial state with the exact result. Print out the fidelity with the exact result.\nEnd of explanation\nxmon_circuit = cirq_google.optimized_for_xmon(circuit)\nprint(xmon_circuit.to_qasm())\nExplanation: Thus, we see that the circuit correctly effects the intended evolution. We can now use Cirq's compiler to output the circuit using gates native to near-term devices, and then optimize those circuits. We'll output in QASM 2.0 just to demonstrate that functionality.\nEnd of explanation"}}},{"rowIdx":2181,"cells":{"Unnamed: 0":{"kind":"number","value":2181,"string":"2,181"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n 我们的任务\n垃圾邮件检测是机器学习在现今互联网领域的主要应用之一。几乎所有大型电子邮箱服务提供商都内置了垃圾邮件检测系统,能够自动将此类邮件分类为“垃圾邮件”。 \n在此项目中,我们将使用朴素贝叶斯算法创建一个模型,该模型会通过我们对模型的训练将信息数据集分类为垃圾信息或非垃圾信息。对垃圾文本信息进行大致了解十分重要。通常它们都包含“免费”、“赢取”、“获奖者”、“现金”、“奖品”等字眼,因为这些它们专门用来吸引你的注意力,诱惑你打开信息。此外,垃圾信息的文字一般都使用大写形式和大量感叹号。收信人能轻易辨认垃圾信息,而我们的目标是训练模型帮助我们识别垃圾信息!\n能够识别垃圾信息是一种二元分类问题,因为此处信息只有“垃圾信息”或“非垃圾信息”这两种分类。此外,这是一种监督式学习问题,因为我们会向模型中提供带标签数据集,模型能够从中学习规律并在日后做出预测。\n第 0 步:朴素贝叶斯定理简介\n贝叶斯定理是最早的概率推理算法之一,由 Reverend Bayes 提出(他用来推理上帝是否存在),该定理在某些用例中依然很有用。\n理解该定理的最佳方式是通过一个例子来讲解。假设你是一名特勤人员,你接到任务,需要在共和党总统候选人的某次竞选演说中保护他/她的安全。这场竞选演说是所有人都可以参加的公开活动,你的任务并不简单,需要时刻注意危险是否存在。一种方式是对每个人都设定一个威胁因子,根据人的特征(例如年龄、性别,是否随身带包以及紧张程度等等),你可以判断此人是否存在威胁。\n如果某人符合所有这些特征,已经超出了你内心中的疑虑阈值,你可以采取措施并将此人带离活动现场。贝叶斯定理的原理也是如此,我们将根据某些相关事件(某人的年龄、性别、是否带包了、紧张程度等)的发生概率计算某个事件(某人存在威胁)的概率。\n你还需要考虑这些特征之间的独立性。例如,如果在活动现场,有个孩子看起来很紧张,那么与紧张的成人相比,孩子存在威胁的可能性会更低。为了深入讲解这一点,看看下面两个特征:年龄和紧张程度。假设我们单独研究这些特征,我们可以设计一个将所有紧张的人视作潜在威胁人士的模型。但是,很有可能会有很多假正例,因为现场的未成年人很有可能会紧张。因此同时考虑年龄和“紧张程度”特征肯定会更准确地反映哪些人存在威胁。\n这就是该定理的“朴素”一词的含义,该定理会认为每个特征相互之间都保持独立,但实际上并非始终是这样,因此会影响到最终的结论。\n简而言之,贝叶斯定理根据某些其他事件(在此例中是信息被分类为垃圾信息)的联合概率分布计算某个事件(在此例中是信息为垃圾信息)的发生概率。稍后我们将深入了解贝叶斯定理的原理,但首先了解下我们将处理的数据。\n第 1.1 步:了解我们的数据集 ###\n我们将使用来自 UCI 机器学习资源库中的数据集,该资源库有大量供实验性研究的精彩数据集。这是直接数据链接。\n 下面是该数据的预览: \n\n数据集中的列目前没有命名,可以看出有 2 列。\n第一列有两个值:“ham”,表示信息不是垃圾信息,以及“spam”,表示信息是垃圾信息。\n第二列是被分类的信息的文本内容。\n 说明:\n* 使用 read_table 方法可以将数据集导入 pandas 数据帧。因为这是一个用制表符分隔的数据集,因此我们将使用“\\t”作为“sep”参数的值,表示这种分隔格式。\n* 此外,通过为 read_table() 的“names”参数指定列表 ['label, 'sms_message'],重命名列。\n* 用新的列名输出数据帧的前五个值。\nStep1: 第 1.2 步:数据预处理\n我们已经大概了解数据集的结构,现在将标签转换为二元变量,0 表示“ham”(即非垃圾信息),1表示“spam”,这样比较方便计算。\n你可能会疑问,为何要执行这一步?答案在于 scikit-learn 处理输入的方式。Scikit-learn 只处理数字值,因此如果标签值保留为字符串,scikit-learn 会自己进行转换(更确切地说,字符串标签将转型为未知浮点值)。 \n如果标签保留为字符串,模型依然能够做出预测,但是稍后计算效果指标(例如计算精确率和召回率分数)时可能会遇到问题。因此,为了避免稍后出现意外的陷阱,最好将分类值转换为整数,再传入模型中。 \n说明:\n* 使用映射方法将“标签”列中的值转换为数字值,如下所示:\n{'ham'\nStep2: 第 2.1 步:Bag of words\n我们的数据集中有大量文本数据(5,572 行数据)。大多数机器学习算法都要求传入的输入是数字数据,而电子邮件/信息通常都是文本。\n现在我们要介绍 Bag of Words (BoW) 这个概念,它用来表示要处理的问题具有“大量单词”或很多文本数据。BoW 的基本概念是拿出一段文本,计算该文本中单词的出现频率。注意:BoW 平等地对待每个单词,单词的出现顺序并不重要。\n利用我们将介绍的流程,我们可以将文档集合转换成矩阵,每个文档是一行,每个单词(令牌)是一列,对应的(行,列)值是每个单词或令牌在此文档中出现的频率。\n例如:\n假设有四个如下所示的文档:\n['Hello, how are you!',\n'Win money, win from home.',\n'Call me now',\n'Hello, Call you tomorrow?']\n我们的目标是将这组文本转换为频率分布矩阵,如下所示:\n\n从图中可以看出,文档在行中进行了编号,每个单词是一个列名称,相应的值是该单词在文档中出现的频率。\n我们详细讲解下,看看如何使用一小组文档进行转换。\n要处理这一步,我们将使用 sklearns \ncount vectorizer 方法,该方法的作用如下所示:\n它会令牌化字符串(将字符串划分为单个单词)并为每个令牌设定一个整型 ID。\n它会计算每个令牌的出现次数。\n 请注意: \nCountVectorizer 方法会自动将所有令牌化单词转换为小写形式,避免区分“He”和“he”等单词。为此,它会使用参数 lowercase,该参数默认设为 True。\n它还会忽略所有标点符号,避免区分后面有标点的单词(例如“hello!”)和前后没有标点的同一单词(例如“hello”)。为此,它会使用参数 token_pattern,该参数使用默认正则表达式选择具有 2 个或多个字母数字字符的令牌。\n要注意的第三个参数是 stop_words。停用词是指某个语言中最常用的字词,包括“am”、“an”、“and”、“the”等。 通过将此参数值设为 english,CountVectorizer 将自动忽略(输入文本中)出现在 scikit-learn 中的内置英语停用词列表中的所有单词。这非常有用,因为当我们尝试查找表明是垃圾内容的某些单词时,停用词会使我们的结论出现偏差。\n我们将在之后的步骤中深入讲解在模型中应用每种预处理技巧的效果,暂时先知道在处理文本数据时,有这些预处理技巧可采用。\n第 2.2 步:从头实现 Bag of Words\n在深入了解帮助我们处理繁重工作的 scikit-learn 的 Bag of Words(BoW) 库之前,首先我们自己实现该步骤,以便了解该库的背后原理。 \n 第 1 步:将所有字符串转换成小写形式。\n假设有一个文档集合:\nStep3: 说明:\n* 将文档集合中的所有字符串转换成小写形式。将它们保存到叫做“lower_case_documents”的列表中。你可以使用 lower() 方法在 python 中将字符串转换成小写形式。\nStep4: 第 2 步:删除所有标点符号 \n说明:\n删除文档集合中的字符串中的所有标点。将它们保存在叫做“sans_punctuation_documents”的列表中。\nStep5: 第 3 步:令牌化 \n令牌化文档集合中的句子是指使用分隔符将句子拆分成单个单词。分隔符指定了我们将使用哪个字符来表示单词的开始和结束位置(例如,我们可以使用一个空格作为我们的文档集合的单词分隔符。)\n说明:\n使用 split() 方法令牌化“sans_punctuation_documents”中存储的字符串,并将最终文档集合存储在叫做“preprocessed_documents”的列表中。\nStep6: 第 4 步:计算频率 \n我们已经获得所需格式的文档集合,现在可以数出每个单词在文档集合的每个文档中出现的次数了。为此,我们将使用 Python collections 库中的 Counter 方法。\nCounter 会数出列表中每项的出现次数,并返回一个字典,键是被数的项目,相应的值是该项目在列表中的计数。 \n说明:\n 使用 Counter() 方法和作为输入的 preprocessed_documents 创建一个字典,键是每个文档中的每个单词,相应的值是该单词的出现频率。将每个 Counter 字典当做项目另存到一个叫做“frequency_list”的列表中。\nStep7: 恭喜!你从头实现了 Bag of Words 流程!正如在上一个输出中看到的,我们有一个频率分布字典,清晰地显示了我们正在处理的文本。\n我们现在应该充分理解 scikit-learn 中的 sklearn.feature_extraction.text.CountVectorizer 方法的背后原理了。\n我们将在下一步实现 sklearn.feature_extraction.text.CountVectorizer 方法。\n第 2.3 步:在 scikit-learn 中实现 Bag of Words\n我们已经从头实现了 BoW 概念,并使用 scikit-learn 以简洁的方式实现这一流程。我们将使用在上一步用到的相同文档集合。\nStep8: 说明:\n 导入 sklearn.feature_extraction.text.CountVectorizer 方法并创建一个实例,命名为 'count_vector'。\nStep9: 使用 CountVectorizer() 预处理数据 \n在第 2.2 步,我们从头实现了可以首先清理数据的 CountVectorizer() 方法。清理过程包括将所有数据转换为小写形式,并删除所有标点符号。CountVectorizer() 具有某些可以帮助我们完成这些步骤的参数,这些参数包括:\nlowercase = True\nStep10: token_pattern = (?u)\\\\b\\\\w\\\\w+\\\\b\nStep11: stop_words\nStep12: 你可以通过如下所示输出 count_vector 对象,查看该对象的所有参数值:\nStep13: 说明:\n使用 fit() 将你的文档数据集与 CountVectorizer 对象进行拟合,并使用 get_feature_names() 方法获得被归类为特征的单词列表。\nStep14: get_feature_names() 方法会返回此数据集的特征名称,即组成 'documents' 词汇表的单词集合。\n说明:\n创建一个矩阵,行是 4 个文档中每个文档的行,列是每个单词。对应的值(行,列)是该单词(在列中)在特定文档(在行中)中出现的频率。为此,你可以使用 transform() 方法并传入文档数据集作为参数。transform() 方法会返回一个 numpy 整数矩阵,你可以使用 toarray() 将其转换为数组,称之为 'doc_array'\nStep15: 现在,对于单词在文档中的出现频率,我们已经获得了整洁的文档表示形式。为了方便理解,下一步我们会将此数组转换为数据帧,并相应地为列命名。\n说明:\n将我们获得并加载到 'doc_array' 中的数组转换为数据帧,并将列名设为单词名称(你之前使用 get_feature_names() 计算了名称)。将该数据帧命名为 'frequency_matrix'。\nStep16: 恭喜!你为我们创建的文档数据集成功地实现了 Bag of Words 问题。\n直接使用该方法的一个潜在问题是如果我们的文本数据集非常庞大(假设有一大批新闻文章或电子邮件数据),由于语言本身的原因,肯定有某些值比其他值更常见。例如“is”、“the”、“an”等单词、代词、语法结构等会使矩阵出现偏斜并影响到分析结果。\n有几种方式可以减轻这种情况。一种方式是使用 stop_words 参数并将其值设为 english。这样会自动忽略 scikit-learn 中的内置英语停用词列表中出现的所有单词(来自输入文本)。\n另一种方式是使用 tfidf 方法。该方法已经超出了这门课程的讲解范畴。\n第 3.1 步:训练集和测试集\n我们已经知道如何处理 Bag of Words 问题,现在回到我们的数据集并继续我们的分析工作。第一步是将数据集拆分为训练集和测试集,以便稍后测试我们的模型。\n说明:\n通过在 sklearn 中使用 train_test_split 方法,将数据集拆分为训练集和测试集。使用以下变量拆分数据:\n* X_train 是 'sms_message' 列的训练数据。\n* y_train 是 'label' 列的训练数据\n* X_test 是 'sms_message' 列的测试数据。\n* y_test 是 'label' 列的测试数据。\n输出每个训练数据和测试数据的行数。\nStep17: 第 3.2 步:对数据集应用 Bag of Words 流程。\n我们已经拆分了数据,下个目标是按照第 2 步:Bag of words 中的步骤操作,并将数据转换为期望的矩阵格式。为此,我们将像之前一样使用 CountVectorizer()。我们需要完成两步:\n首先,我们需要对 CountVectorizer()拟合训练数据 (X_train) 并返回矩阵。\n其次,我们需要转换测试数据 (X_test) 以返回矩阵。\n注意:X_train 是数据集中 'sms_message' 列的训练数据,我们将使用此数据训练模型。\nX_test 是 'sms_message' 列的测试数据,我们将使用该数据(转换为矩阵后)进行预测。然后在后面的步骤中将这些预测与 y_test 进行比较。\n我们暂时为你提供了进行矩阵转换的代码!\nStep18: 第 4.1 步:从头实现贝叶斯定理\n我们的数据集已经是我们希望的格式,现在可以进行任务的下一步了,即研究用来做出预测并将信息分类为垃圾信息或非垃圾信息的算法。记得在该项目的开头,我们简要介绍了贝叶斯定理,现在我们将深入讲解该定理。通俗地说,贝叶斯定理根据与相关事件有关的其他事件的概率计算该事件的发生概率。它由先验概率(我们知道的概率或提供给我们的概率)和后验概率(我们希望用先验部分计算的概率)组成。\n我们用一个简单的示例从头实现贝叶斯定理。假设我们要根据某人接受糖尿病检测后获得阳性结果计算此人有糖尿病的概率。\n在医学领域,此类概率非常重要,因为它们涉及的是生死情况。 \n我们假设:\nP(D) 是某人患有糖尿病的概率。值为 0.01,换句话说,普通人群中有 1% 的人患有糖尿病(免责声明:这些值只是假设,并非任何医学研究的结论)。\nP(Pos):是获得阳性测试结果的概率。\nP(Neg):是获得阴性测试结果的概率。\nP(Pos|D):是本身有糖尿病并且获得阳性测试结果的概率,值为 0.9,换句话说,该测试在 90% 的情况下是正确的。亦称为敏感性或真正例率。\nP(Neg|~D):是本身没有糖尿病并且获得阴性测试结果的概率,值也为 0.9 ,因此在 90% 的情况下是正确的。亦称为特异性或真负例率。\n贝叶斯公式如下所示:\n\nP(A):A 独立发生的先验概率。在我们的示例中为 P(D),该值已经提供给我们了 。\nP(B):B 独立发生的先验概率。在我们的示例中为 P(Pos)。\nP(A|B):在给定 B 的情况下 A 发生的后验概率,在我们的示例中为 P(D|Pos),即某人的测试结果为阳性时患有糖尿病的概率。这是我们要计算的值。\nP(B|A):在给定 A 的情况下 B 可能发生的概率。在我们的示例中为 P(Pos|D),该值已经提供给我们了 。\n将这些值代入贝叶斯定理公式中:\nP(D|Pos) = P(D) * P(Pos|D) / P(Pos)\n获得阳性测试结果 P(Pos) 的概率可以使用敏感性和特异性来计算,如下所示:\nP(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]\nStep19: 我们可以利用所有这些信息计算后验概率,如下所示:\n​ \n某人测试结果为阳性时患有糖尿病的概率为:\nP(D|Pos) = (P(D) * Sensitivity)) / P(Pos)\n某人测试结果为阳性时没有糖尿病的概率为:\nP(~D|Pos) = (P(~D) * (1-Specificity)) / P(Pos)\n后验概率的和将始终为 1。\nStep20: 恭喜!你从头实现了贝叶斯定理。你的分析表明即使某人的测试结果为阳性,他/她也有 8.3% 的概率实际上患有糖尿病,以及 91.67% 的概率没有糖尿病。当然前提是全球只有 1% 的人群患有糖尿病,这只是个假设。\n “朴素贝叶斯”中的“朴素”一词是什么意思? \n朴素贝叶斯中的“朴素”一词实际上是指,算法在进行预测时使用的特征相互之间是独立的,但实际上并非始终这样。在我们的糖尿病示例中,我们只考虑了一个特征,即测试结果。假设我们添加了另一个特征“锻炼”。假设此特征具有二元值 0 和 1,0 表示某人一周的锻炼时间不超过 2 天,1 表示某人一周的锻炼时间超过 2 天。如果我们要同时使用这两个特征(即测试结果和“锻炼”特征的值)计算最终概率,贝叶斯定理将不可行。朴素贝叶斯是贝叶斯定理的一种延伸,假设所有特征相互之间是独立的。\n第 4.2 步:从头实现朴素贝叶斯\n你已经知道贝叶斯定理的详细原理,现在我们将用它来考虑有多个特征的情况。\n假设有两个政党的候选人,“Jill Stein”是绿党候选人,“Gary Johnson”是自由党的候选人,两位候选人在演讲中提到“自由”、“移民”和“环境”这些字眼的概率为:\nJill Stein 提到“自由”的概率:0.1 ---------> P(F|J)\nJill Stein 提到“移民”的概率:0.1 -----> P(I|J)\nJill Stein 提到“环境”的概率:0.8 -----> P(E|J)\nGary Johnson 提到“自由”的概率:0.7 -------> P(F|G)\nGary Johnson 提到“移民”的概率:0.2 ---> P(I|G)\nGary Johnson 提到“环境”的概率:0.1 ---> P(E|G)\n假设 Jill Stein 发表演讲的概率 P(J) 是 0.5,Gary Johnson 也是 P(G) = 0.5。\n了解这些信息后,如果我们要计算 Jill Stein 提到“自由”和“移民”的概率,该怎么做呢?这时候朴素贝叶斯定理就派上用场了,我们将考虑两个特征:“自由”和“移民”。\n现在我们可以定义朴素贝叶斯定理的公式:\n\n在该公式中,y 是分类变量,即候选人的姓名,x1 到 xn 是特征向量,即单个单词。该定理假设每个特征向量或单词 (xi) 相互之间是独立的。\n为了详细讲解该公式,我们需要计算以下后验概率:\nP(J|F,I):Jill Stein 提到“自由”和“移民”的概率。\nStep21: P(G|F,I):Gary Johnson 提到“自由”和“移民”的概率。\nStep22: 现在可以计算 P(J|F,I) 的概率,即 Jill Stein 提到“自由”和“移民”的概率,以及 P(G|F,I),即 Gary Johnson 提到“自由”和“移民”的概率。\nStep23: 可以看出,和贝叶斯定理一样,后验概率之和等于 1。恭喜!你从头实现了朴素贝叶斯定理。分析表明,绿党的 Jill Stein 在演讲中提到“自由”和“移民”的概率只有 6.6%,而自由党的 Gary Johnson 有 93.3% 的可能性会提到这两个词。\n另一个比较常见的朴素贝叶斯定理应用示例是在搜索引擎中搜索“萨克拉门托国王”。为了使我们能够获得与萨克拉门托国王队 NBA 篮球队相关的结果,搜索引擎需要将这两个单词关联到一起,而不是单独处理它们,否则就会获得标有“萨克拉门托”的图片(例如风光图片)以及关于“国王”的图片(可能是历史上的国王),而实际上我们想要搜索的是关于篮球队的图片。这是一种搜索引擎将单词当做非独立个体(因此采用的是“朴素”方式)的经典示例。 \n将此方法应用到我们的垃圾信息分类问题上,朴素贝叶斯算法会查看每个单词,而不是将它们当做有任何联系的关联体。对于垃圾内容检测器来说,这么做通常都可行,因为有些禁用词几乎肯定会被分类为垃圾内容,例如包含“伟哥”的电子邮件通常都被归类为垃圾邮件。\n第 5 步:使用 scikit-learn 实现朴素贝叶斯\n幸运的是,sklearn 具有多个朴素贝叶斯实现,这样我们就不用从头进行计算。我们将使用 sklearns 的 sklearn.naive_bayes 方法对我们的数据集做出预测。\n具体而言,我们将使用多项式朴素贝叶斯实现。这个分类器适合分类离散特征(例如我们的单词计数文本分类)。它会将整数单词计数作为输入。另一方面,高斯朴素贝叶斯更适合连续数据,因为它假设输入数据是高斯(正态)分布。\nStep24: 我们已经对测试集进行预测,现在需要检查预测的准确率了。\n第 6 步:评估模型\n我们已经对测试集进行了预测,下一个目标是评估模型的效果。我们可以采用各种衡量指标,但首先快速总结下这些指标。\n 准确率 衡量的是分类器做出正确预测的概率,即正确预测的数量与预测总数(测试数据点的数量)之比。\n 精确率 指的是分类为垃圾信息的信息实际上是垃圾信息的概率,即真正例(分类为垃圾内容并且实际上是垃圾内容的单词)与所有正例(所有分类为垃圾内容的单词,无论是否分类正确)之比,换句话说,是以下公式的比值结果:\n[True Positives/(True Positives + False Positives)]\n 召回率(敏感性)表示实际上为垃圾信息并且被分类为垃圾信息的信息所占比例,即真正例(分类为垃圾内容并且实际上是垃圾内容的单词)与所有为垃圾内容的单词之比,换句话说,是以下公式的比值结果:\n[True Positives/(True Positives + False Negatives)]\n对于偏态分类分布问题(我们的数据集就属于偏态分类),例如如果有 100 条信息,只有 2 条是垃圾信息,剩下的 98 条不是,则准确率本身并不是很好的指标。我们将 90 条信息分类为垃圾信息(包括 2 条垃圾信息,但是我们将其分类为非垃圾信息,因此它们属于假负例),并将 10 条信息分类为垃圾信息(所有 10 条都是假正例),依然会获得比较高的准确率分数。对于此类情形,精确率和召回率非常实用。可以通过这两个指标获得 F1 分数,即精确率和召回率分数的加权平均值。该分数的范围是 0 到 1,1 表示最佳潜在 F1 分数。\n我们将使用所有四个指标确保我们的模型效果很好。这四个指标的值范围都在 0 到 1 之间,分数尽量接近 1 可以很好地表示模型的效果如何。"},"code_prompt":{"kind":"string","value":"Python Code:\n'''\nSolution\n'''\nimport pandas as pd\n# Dataset from - https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection\ndf = pd.read_table(\"smsspamcollection/SMSSpamCollection\", sep=\"\\t\",names = ['label', 'sms_message'] )\n# Output printing out first 5 columns\ndf.head()\nExplanation: 我们的任务\n垃圾邮件检测是机器学习在现今互联网领域的主要应用之一。几乎所有大型电子邮箱服务提供商都内置了垃圾邮件检测系统,能够自动将此类邮件分类为“垃圾邮件”。 \n在此项目中,我们将使用朴素贝叶斯算法创建一个模型,该模型会通过我们对模型的训练将信息数据集分类为垃圾信息或非垃圾信息。对垃圾文本信息进行大致了解十分重要。通常它们都包含“免费”、“赢取”、“获奖者”、“现金”、“奖品”等字眼,因为这些它们专门用来吸引你的注意力,诱惑你打开信息。此外,垃圾信息的文字一般都使用大写形式和大量感叹号。收信人能轻易辨认垃圾信息,而我们的目标是训练模型帮助我们识别垃圾信息!\n能够识别垃圾信息是一种二元分类问题,因为此处信息只有“垃圾信息”或“非垃圾信息”这两种分类。此外,这是一种监督式学习问题,因为我们会向模型中提供带标签数据集,模型能够从中学习规律并在日后做出预测。\n第 0 步:朴素贝叶斯定理简介\n贝叶斯定理是最早的概率推理算法之一,由 Reverend Bayes 提出(他用来推理上帝是否存在),该定理在某些用例中依然很有用。\n理解该定理的最佳方式是通过一个例子来讲解。假设你是一名特勤人员,你接到任务,需要在共和党总统候选人的某次竞选演说中保护他/她的安全。这场竞选演说是所有人都可以参加的公开活动,你的任务并不简单,需要时刻注意危险是否存在。一种方式是对每个人都设定一个威胁因子,根据人的特征(例如年龄、性别,是否随身带包以及紧张程度等等),你可以判断此人是否存在威胁。\n如果某人符合所有这些特征,已经超出了你内心中的疑虑阈值,你可以采取措施并将此人带离活动现场。贝叶斯定理的原理也是如此,我们将根据某些相关事件(某人的年龄、性别、是否带包了、紧张程度等)的发生概率计算某个事件(某人存在威胁)的概率。\n你还需要考虑这些特征之间的独立性。例如,如果在活动现场,有个孩子看起来很紧张,那么与紧张的成人相比,孩子存在威胁的可能性会更低。为了深入讲解这一点,看看下面两个特征:年龄和紧张程度。假设我们单独研究这些特征,我们可以设计一个将所有紧张的人视作潜在威胁人士的模型。但是,很有可能会有很多假正例,因为现场的未成年人很有可能会紧张。因此同时考虑年龄和“紧张程度”特征肯定会更准确地反映哪些人存在威胁。\n这就是该定理的“朴素”一词的含义,该定理会认为每个特征相互之间都保持独立,但实际上并非始终是这样,因此会影响到最终的结论。\n简而言之,贝叶斯定理根据某些其他事件(在此例中是信息被分类为垃圾信息)的联合概率分布计算某个事件(在此例中是信息为垃圾信息)的发生概率。稍后我们将深入了解贝叶斯定理的原理,但首先了解下我们将处理的数据。\n第 1.1 步:了解我们的数据集 ###\n我们将使用来自 UCI 机器学习资源库中的数据集,该资源库有大量供实验性研究的精彩数据集。这是直接数据链接。\n 下面是该数据的预览: \n\n数据集中的列目前没有命名,可以看出有 2 列。\n第一列有两个值:“ham”,表示信息不是垃圾信息,以及“spam”,表示信息是垃圾信息。\n第二列是被分类的信息的文本内容。\n 说明:\n* 使用 read_table 方法可以将数据集导入 pandas 数据帧。因为这是一个用制表符分隔的数据集,因此我们将使用“\\t”作为“sep”参数的值,表示这种分隔格式。\n* 此外,通过为 read_table() 的“names”参数指定列表 ['label, 'sms_message'],重命名列。\n* 用新的列名输出数据帧的前五个值。\nEnd of explanation\n'''\nSolution\n'''\ndf['label'] = df.label.map({\"ham\":0, \"spam\":1})\nExplanation: 第 1.2 步:数据预处理\n我们已经大概了解数据集的结构,现在将标签转换为二元变量,0 表示“ham”(即非垃圾信息),1表示“spam”,这样比较方便计算。\n你可能会疑问,为何要执行这一步?答案在于 scikit-learn 处理输入的方式。Scikit-learn 只处理数字值,因此如果标签值保留为字符串,scikit-learn 会自己进行转换(更确切地说,字符串标签将转型为未知浮点值)。 \n如果标签保留为字符串,模型依然能够做出预测,但是稍后计算效果指标(例如计算精确率和召回率分数)时可能会遇到问题。因此,为了避免稍后出现意外的陷阱,最好将分类值转换为整数,再传入模型中。 \n说明:\n* 使用映射方法将“标签”列中的值转换为数字值,如下所示:\n{'ham':0, 'spam':1} 这样会将“ham”值映射为 0,将“spam”值映射为 1。\n* 此外,为了知道我们正在处理的数据集有多大,使用“shape”输出行数和列数\nEnd of explanation\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\nExplanation: 第 2.1 步:Bag of words\n我们的数据集中有大量文本数据(5,572 行数据)。大多数机器学习算法都要求传入的输入是数字数据,而电子邮件/信息通常都是文本。\n现在我们要介绍 Bag of Words (BoW) 这个概念,它用来表示要处理的问题具有“大量单词”或很多文本数据。BoW 的基本概念是拿出一段文本,计算该文本中单词的出现频率。注意:BoW 平等地对待每个单词,单词的出现顺序并不重要。\n利用我们将介绍的流程,我们可以将文档集合转换成矩阵,每个文档是一行,每个单词(令牌)是一列,对应的(行,列)值是每个单词或令牌在此文档中出现的频率。\n例如:\n假设有四个如下所示的文档:\n['Hello, how are you!',\n'Win money, win from home.',\n'Call me now',\n'Hello, Call you tomorrow?']\n我们的目标是将这组文本转换为频率分布矩阵,如下所示:\n\n从图中可以看出,文档在行中进行了编号,每个单词是一个列名称,相应的值是该单词在文档中出现的频率。\n我们详细讲解下,看看如何使用一小组文档进行转换。\n要处理这一步,我们将使用 sklearns \ncount vectorizer 方法,该方法的作用如下所示:\n它会令牌化字符串(将字符串划分为单个单词)并为每个令牌设定一个整型 ID。\n它会计算每个令牌的出现次数。\n 请注意: \nCountVectorizer 方法会自动将所有令牌化单词转换为小写形式,避免区分“He”和“he”等单词。为此,它会使用参数 lowercase,该参数默认设为 True。\n它还会忽略所有标点符号,避免区分后面有标点的单词(例如“hello!”)和前后没有标点的同一单词(例如“hello”)。为此,它会使用参数 token_pattern,该参数使用默认正则表达式选择具有 2 个或多个字母数字字符的令牌。\n要注意的第三个参数是 stop_words。停用词是指某个语言中最常用的字词,包括“am”、“an”、“and”、“the”等。 通过将此参数值设为 english,CountVectorizer 将自动忽略(输入文本中)出现在 scikit-learn 中的内置英语停用词列表中的所有单词。这非常有用,因为当我们尝试查找表明是垃圾内容的某些单词时,停用词会使我们的结论出现偏差。\n我们将在之后的步骤中深入讲解在模型中应用每种预处理技巧的效果,暂时先知道在处理文本数据时,有这些预处理技巧可采用。\n第 2.2 步:从头实现 Bag of Words\n在深入了解帮助我们处理繁重工作的 scikit-learn 的 Bag of Words(BoW) 库之前,首先我们自己实现该步骤,以便了解该库的背后原理。 \n 第 1 步:将所有字符串转换成小写形式。\n假设有一个文档集合:\nEnd of explanation\n'''\nSolution:\n'''\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\nlower_case_documents = []\nfor i in documents:\n low_doc = i.lower()\n lower_case_documents.append(low_doc)\nprint(lower_case_documents)\nExplanation: 说明:\n* 将文档集合中的所有字符串转换成小写形式。将它们保存到叫做“lower_case_documents”的列表中。你可以使用 lower() 方法在 python 中将字符串转换成小写形式。\nEnd of explanation\n'''\nSolution:\n'''\nsans_punctuation_documents = []\nimport string\nimport re\nfor i in lower_case_documents:\n punc = '[,.?!\\']' \n string = re.sub(punc, '', i)\n sans_punctuation_documents.append(string)\nprint(sans_punctuation_documents)\nExplanation: 第 2 步:删除所有标点符号 \n说明:\n删除文档集合中的字符串中的所有标点。将它们保存在叫做“sans_punctuation_documents”的列表中。\nEnd of explanation\n'''\nSolution:\n'''\npreprocessed_documents = []\nfor i in sans_punctuation_documents:\n preprocessed_documents.append(i.split(' '))\nprint(preprocessed_documents)\nExplanation: 第 3 步:令牌化 \n令牌化文档集合中的句子是指使用分隔符将句子拆分成单个单词。分隔符指定了我们将使用哪个字符来表示单词的开始和结束位置(例如,我们可以使用一个空格作为我们的文档集合的单词分隔符。)\n说明:\n使用 split() 方法令牌化“sans_punctuation_documents”中存储的字符串,并将最终文档集合存储在叫做“preprocessed_documents”的列表中。\nEnd of explanation\n'''\nSolution\n'''\nfrequency_list = []\nimport pprint\nfrom collections import Counter\nfor i in preprocessed_documents:\n dic = Counter(i) \n frequency_list.append(dic) \npprint.pprint(frequency_list)\nExplanation: 第 4 步:计算频率 \n我们已经获得所需格式的文档集合,现在可以数出每个单词在文档集合的每个文档中出现的次数了。为此,我们将使用 Python collections 库中的 Counter 方法。\nCounter 会数出列表中每项的出现次数,并返回一个字典,键是被数的项目,相应的值是该项目在列表中的计数。 \n说明:\n 使用 Counter() 方法和作为输入的 preprocessed_documents 创建一个字典,键是每个文档中的每个单词,相应的值是该单词的出现频率。将每个 Counter 字典当做项目另存到一个叫做“frequency_list”的列表中。\nEnd of explanation\n'''\nHere we will look to create a frequency matrix on a smaller document set to make sure we understand how the \ndocument-term matrix generation happens. We have created a sample document set 'documents'.\n'''\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\nExplanation: 恭喜!你从头实现了 Bag of Words 流程!正如在上一个输出中看到的,我们有一个频率分布字典,清晰地显示了我们正在处理的文本。\n我们现在应该充分理解 scikit-learn 中的 sklearn.feature_extraction.text.CountVectorizer 方法的背后原理了。\n我们将在下一步实现 sklearn.feature_extraction.text.CountVectorizer 方法。\n第 2.3 步:在 scikit-learn 中实现 Bag of Words\n我们已经从头实现了 BoW 概念,并使用 scikit-learn 以简洁的方式实现这一流程。我们将使用在上一步用到的相同文档集合。\nEnd of explanation\n'''\nSolution\n'''\nfrom sklearn.feature_extraction.text import CountVectorizer\ncount_vector = CountVectorizer()\nExplanation: 说明:\n 导入 sklearn.feature_extraction.text.CountVectorizer 方法并创建一个实例,命名为 'count_vector'。\nEnd of explanation\n`lowercase` 参数的默认值为 `True`,它会将所有文本都转换为小写形式。\nExplanation: 使用 CountVectorizer() 预处理数据 \n在第 2.2 步,我们从头实现了可以首先清理数据的 CountVectorizer() 方法。清理过程包括将所有数据转换为小写形式,并删除所有标点符号。CountVectorizer() 具有某些可以帮助我们完成这些步骤的参数,这些参数包括:\nlowercase = True\nEnd of explanation\n`token_pattern` 参数具有默认正则表达式值 `(?u)\\\\b\\\\w\\\\w+\\\\b`,它会忽略所有标点符号并将它们当做分隔符,并将长度大于等于 2 的字母数字字符串当做单个令牌或单词。\nExplanation: token_pattern = (?u)\\\\b\\\\w\\\\w+\\\\b\nEnd of explanation\n`stop_words` 参数如果设为 `english`,将从文档集合中删除与 scikit-learn 中定义的英语停用词列表匹配的所有单词。考虑到我们的数据集规模不大,并且我们处理的是信息,并不是电子邮件这样的更庞大文本来源,因此我们将不设置此参数值。\nExplanation: stop_words\nEnd of explanation\n'''\nPractice node:\nPrint the 'count_vector' object which is an instance of 'CountVectorizer()'\n'''\nprint(count_vector)\nExplanation: 你可以通过如下所示输出 count_vector 对象,查看该对象的所有参数值:\nEnd of explanation\n'''\nSolution:\n'''\ncount_vector.fit(documents)\ncount_vector.get_feature_names()\nExplanation: 说明:\n使用 fit() 将你的文档数据集与 CountVectorizer 对象进行拟合,并使用 get_feature_names() 方法获得被归类为特征的单词列表。\nEnd of explanation\n'''\nSolution\n'''\ndoc_array = count_vector.transform(documents).toarray()\ndoc_array\nExplanation: get_feature_names() 方法会返回此数据集的特征名称,即组成 'documents' 词汇表的单词集合。\n说明:\n创建一个矩阵,行是 4 个文档中每个文档的行,列是每个单词。对应的值(行,列)是该单词(在列中)在特定文档(在行中)中出现的频率。为此,你可以使用 transform() 方法并传入文档数据集作为参数。transform() 方法会返回一个 numpy 整数矩阵,你可以使用 toarray() 将其转换为数组,称之为 'doc_array'\nEnd of explanation\n'''\nSolution\n'''\nfrequency_matrix = pd.DataFrame(doc_array, columns = count_vector.get_feature_names())\nfrequency_matrix\nExplanation: 现在,对于单词在文档中的出现频率,我们已经获得了整洁的文档表示形式。为了方便理解,下一步我们会将此数组转换为数据帧,并相应地为列命名。\n说明:\n将我们获得并加载到 'doc_array' 中的数组转换为数据帧,并将列名设为单词名称(你之前使用 get_feature_names() 计算了名称)。将该数据帧命名为 'frequency_matrix'。\nEnd of explanation\n'''\nSolution\nNOTE: sklearn.cross_validation will be deprecated soon to sklearn.model_selection \n'''\n# split into training and testing sets\n# USE from sklearn.model_selection import train_test_split to avoid seeing deprecation warning.\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(df['sms_message'], \n df['label'], \n random_state=1)\nprint('Number of rows in the total set: {}'.format(df.shape[0]))\nprint('Number of rows in the training set: {}'.format(X_train.shape[0]))\nprint('Number of rows in the test set: {}'.format(X_test.shape[0]))\nExplanation: 恭喜!你为我们创建的文档数据集成功地实现了 Bag of Words 问题。\n直接使用该方法的一个潜在问题是如果我们的文本数据集非常庞大(假设有一大批新闻文章或电子邮件数据),由于语言本身的原因,肯定有某些值比其他值更常见。例如“is”、“the”、“an”等单词、代词、语法结构等会使矩阵出现偏斜并影响到分析结果。\n有几种方式可以减轻这种情况。一种方式是使用 stop_words 参数并将其值设为 english。这样会自动忽略 scikit-learn 中的内置英语停用词列表中出现的所有单词(来自输入文本)。\n另一种方式是使用 tfidf 方法。该方法已经超出了这门课程的讲解范畴。\n第 3.1 步:训练集和测试集\n我们已经知道如何处理 Bag of Words 问题,现在回到我们的数据集并继续我们的分析工作。第一步是将数据集拆分为训练集和测试集,以便稍后测试我们的模型。\n说明:\n通过在 sklearn 中使用 train_test_split 方法,将数据集拆分为训练集和测试集。使用以下变量拆分数据:\n* X_train 是 'sms_message' 列的训练数据。\n* y_train 是 'label' 列的训练数据\n* X_test 是 'sms_message' 列的测试数据。\n* y_test 是 'label' 列的测试数据。\n输出每个训练数据和测试数据的行数。\nEnd of explanation\n'''\n[Practice Node]\nThe code for this segment is in 2 parts. Firstly, we are learning a vocabulary dictionary for the training data \nand then transforming the data into a document-term matrix; secondly, for the testing data we are only \ntransforming the data into a document-term matrix.\nThis is similar to the process we followed in Step 2.3\nWe will provide the transformed data to students in the variables 'training_data' and 'testing_data'.\n'''\n'''\nSolution\n'''\n# Instantiate the CountVectorizer method\ncount_vector = CountVectorizer()\n# Fit the training data and then return the matrix\ntraining_data = count_vector.fit_transform(X_train)\n# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()\ntesting_data = count_vector.transform(X_test)\nExplanation: 第 3.2 步:对数据集应用 Bag of Words 流程。\n我们已经拆分了数据,下个目标是按照第 2 步:Bag of words 中的步骤操作,并将数据转换为期望的矩阵格式。为此,我们将像之前一样使用 CountVectorizer()。我们需要完成两步:\n首先,我们需要对 CountVectorizer()拟合训练数据 (X_train) 并返回矩阵。\n其次,我们需要转换测试数据 (X_test) 以返回矩阵。\n注意:X_train 是数据集中 'sms_message' 列的训练数据,我们将使用此数据训练模型。\nX_test 是 'sms_message' 列的测试数据,我们将使用该数据(转换为矩阵后)进行预测。然后在后面的步骤中将这些预测与 y_test 进行比较。\n我们暂时为你提供了进行矩阵转换的代码!\nEnd of explanation\n'''\nInstructions:\nCalculate probability of getting a positive test result, P(Pos)\n'''\n'''\nSolution (skeleton code will be provided)\n'''\n# P(D)\np_diabetes = 0.01\n# P(~D)\np_no_diabetes = 0.99\n# Sensitivity or P(Pos|D)\np_pos_diabetes = 0.9\n# Specificity or P(Neg|~D)\np_neg_no_diabetes = 0.9\n# P(Pos)\np_pos = p_diabetes * p_pos_diabetes + (p_no_diabetes *(1- p_neg_no_diabetes))\nprint('The probability of getting a positive test result P(Pos) is: {}',format(p_pos))\nExplanation: 第 4.1 步:从头实现贝叶斯定理\n我们的数据集已经是我们希望的格式,现在可以进行任务的下一步了,即研究用来做出预测并将信息分类为垃圾信息或非垃圾信息的算法。记得在该项目的开头,我们简要介绍了贝叶斯定理,现在我们将深入讲解该定理。通俗地说,贝叶斯定理根据与相关事件有关的其他事件的概率计算该事件的发生概率。它由先验概率(我们知道的概率或提供给我们的概率)和后验概率(我们希望用先验部分计算的概率)组成。\n我们用一个简单的示例从头实现贝叶斯定理。假设我们要根据某人接受糖尿病检测后获得阳性结果计算此人有糖尿病的概率。\n在医学领域,此类概率非常重要,因为它们涉及的是生死情况。 \n我们假设:\nP(D) 是某人患有糖尿病的概率。值为 0.01,换句话说,普通人群中有 1% 的人患有糖尿病(免责声明:这些值只是假设,并非任何医学研究的结论)。\nP(Pos):是获得阳性测试结果的概率。\nP(Neg):是获得阴性测试结果的概率。\nP(Pos|D):是本身有糖尿病并且获得阳性测试结果的概率,值为 0.9,换句话说,该测试在 90% 的情况下是正确的。亦称为敏感性或真正例率。\nP(Neg|~D):是本身没有糖尿病并且获得阴性测试结果的概率,值也为 0.9 ,因此在 90% 的情况下是正确的。亦称为特异性或真负例率。\n贝叶斯公式如下所示:\n\nP(A):A 独立发生的先验概率。在我们的示例中为 P(D),该值已经提供给我们了 。\nP(B):B 独立发生的先验概率。在我们的示例中为 P(Pos)。\nP(A|B):在给定 B 的情况下 A 发生的后验概率,在我们的示例中为 P(D|Pos),即某人的测试结果为阳性时患有糖尿病的概率。这是我们要计算的值。\nP(B|A):在给定 A 的情况下 B 可能发生的概率。在我们的示例中为 P(Pos|D),该值已经提供给我们了 。\n将这些值代入贝叶斯定理公式中:\nP(D|Pos) = P(D) * P(Pos|D) / P(Pos)\n获得阳性测试结果 P(Pos) 的概率可以使用敏感性和特异性来计算,如下所示:\nP(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]\nEnd of explanation\n'''\nInstructions:\nCompute the probability of an individual having diabetes, given that, that individual got a positive test result.\nIn other words, compute P(D|Pos).\nThe formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)\n'''\n'''\nSolution\n'''\n# P(D|Pos)\np_diabetes_pos = (p_diabetes * p_pos_diabetes)/p_pos\nprint('Probability of an individual having diabetes, given that that individual got a positive test result is:\\\n',format(p_diabetes_pos)) \n'''\nInstructions:\nCompute the probability of an individual not having diabetes, given that, that individual got a positive test result.\nIn other words, compute P(~D|Pos).\nThe formula is: P(~D|Pos) = P(~D) * P(Pos|~D) / P(Pos)\nNote that P(Pos|~D) can be computed as 1 - P(Neg|~D). \nTherefore:\nP(Pos|~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1\n'''\n'''\nSolution\n'''\n# P(Pos|~D)\np_pos_no_diabetes = 0.1\n# P(~D|Pos)\np_no_diabetes_pos = (p_pos_no_diabetes * p_no_diabetes)/p_pos\nprint('Probability of an individual not having diabetes, given that that individual got a positive test result is:'\\\n,p_no_diabetes_pos)\nExplanation: 我们可以利用所有这些信息计算后验概率,如下所示:\n​ \n某人测试结果为阳性时患有糖尿病的概率为:\nP(D|Pos) = (P(D) * Sensitivity)) / P(Pos)\n某人测试结果为阳性时没有糖尿病的概率为:\nP(~D|Pos) = (P(~D) * (1-Specificity)) / P(Pos)\n后验概率的和将始终为 1。\nEnd of explanation\n根据上述公式和贝叶斯定理,我们可以进行以下计算:`P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`。在此等式中,`P(F,I)` 是在研究中提到“自由”和“移民”的概率。\nExplanation: 恭喜!你从头实现了贝叶斯定理。你的分析表明即使某人的测试结果为阳性,他/她也有 8.3% 的概率实际上患有糖尿病,以及 91.67% 的概率没有糖尿病。当然前提是全球只有 1% 的人群患有糖尿病,这只是个假设。\n “朴素贝叶斯”中的“朴素”一词是什么意思? \n朴素贝叶斯中的“朴素”一词实际上是指,算法在进行预测时使用的特征相互之间是独立的,但实际上并非始终这样。在我们的糖尿病示例中,我们只考虑了一个特征,即测试结果。假设我们添加了另一个特征“锻炼”。假设此特征具有二元值 0 和 1,0 表示某人一周的锻炼时间不超过 2 天,1 表示某人一周的锻炼时间超过 2 天。如果我们要同时使用这两个特征(即测试结果和“锻炼”特征的值)计算最终概率,贝叶斯定理将不可行。朴素贝叶斯是贝叶斯定理的一种延伸,假设所有特征相互之间是独立的。\n第 4.2 步:从头实现朴素贝叶斯\n你已经知道贝叶斯定理的详细原理,现在我们将用它来考虑有多个特征的情况。\n假设有两个政党的候选人,“Jill Stein”是绿党候选人,“Gary Johnson”是自由党的候选人,两位候选人在演讲中提到“自由”、“移民”和“环境”这些字眼的概率为:\nJill Stein 提到“自由”的概率:0.1 ---------> P(F|J)\nJill Stein 提到“移民”的概率:0.1 -----> P(I|J)\nJill Stein 提到“环境”的概率:0.8 -----> P(E|J)\nGary Johnson 提到“自由”的概率:0.7 -------> P(F|G)\nGary Johnson 提到“移民”的概率:0.2 ---> P(I|G)\nGary Johnson 提到“环境”的概率:0.1 ---> P(E|G)\n假设 Jill Stein 发表演讲的概率 P(J) 是 0.5,Gary Johnson 也是 P(G) = 0.5。\n了解这些信息后,如果我们要计算 Jill Stein 提到“自由”和“移民”的概率,该怎么做呢?这时候朴素贝叶斯定理就派上用场了,我们将考虑两个特征:“自由”和“移民”。\n现在我们可以定义朴素贝叶斯定理的公式:\n\n在该公式中,y 是分类变量,即候选人的姓名,x1 到 xn 是特征向量,即单个单词。该定理假设每个特征向量或单词 (xi) 相互之间是独立的。\n为了详细讲解该公式,我们需要计算以下后验概率:\nP(J|F,I):Jill Stein 提到“自由”和“移民”的概率。\nEnd of explanation\n根据上述公式,我们可以进行以下计算:`P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)`\n'''\nInstructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or\nP(F,I).\nThe first step is multiplying the probabilities of Jill Stein giving a speech with her individual \nprobabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text\nThe second step is multiplying the probabilities of Gary Johnson giving a speech with his individual \nprobabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text\nThe third step is to add both of these probabilities and you will get P(F,I).\n'''\n'''\nSolution: Step 1\n'''\n# P(J)\np_j = 0.5\n# P(F/J)\np_j_f = 0.1\n# P(I/J)\np_j_i = 0.1\np_j_text = p_j_f * p_j_i * p_j\nprint(p_j_text)\n'''\nSolution: Step 2\n'''\n# P(G)\np_g = 0.5\n# P(F/G)\np_g_f = 0.7\n# P(I/G)\np_g_i = 0.2\np_g_text = p_g_f * p_g_i * p_g;\nprint(p_g_text)\n'''\nSolution: Step 3: Compute P(F,I) and store in p_f_i\n'''\np_f_i = p_j_text + p_g_text\nprint('Probability of words freedom and immigration being said are: ', format(p_f_i))\nExplanation: P(G|F,I):Gary Johnson 提到“自由”和“移民”的概率。\nEnd of explanation\n'''\nInstructions:\nCompute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi\n'''\n'''\nSolution\n'''\np_j_fi = p_j_text / p_f_i\nprint('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi))\n'''\nInstructions:\nCompute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi\n'''\n'''\nSolution\n'''\np_g_fi = p_g_text/p_f_i\nprint('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi))\nExplanation: 现在可以计算 P(J|F,I) 的概率,即 Jill Stein 提到“自由”和“移民”的概率,以及 P(G|F,I),即 Gary Johnson 提到“自由”和“移民”的概率。\nEnd of explanation\n'''\nInstructions:\nWe have loaded the training data into the variable 'training_data' and the testing data into the \nvariable 'testing_data'.\nImport the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier\n'naive_bayes'. You will be training the classifier using 'training_data' and y_train' from our split earlier. \n'''\n'''\nSolution\n'''\nfrom sklearn.naive_bayes import MultinomialNB\nnaive_bayes = MultinomialNB()\nnaive_bayes.fit(training_data,y_train)\n'''\nInstructions:\nNow that our algorithm has been trained using the training data set we can now make some predictions on the test data\nstored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable.\n'''\n'''\nSolution\n'''\npredictions = naive_bayes.predict(testing_data)\nprint(predictions)\nExplanation: 可以看出,和贝叶斯定理一样,后验概率之和等于 1。恭喜!你从头实现了朴素贝叶斯定理。分析表明,绿党的 Jill Stein 在演讲中提到“自由”和“移民”的概率只有 6.6%,而自由党的 Gary Johnson 有 93.3% 的可能性会提到这两个词。\n另一个比较常见的朴素贝叶斯定理应用示例是在搜索引擎中搜索“萨克拉门托国王”。为了使我们能够获得与萨克拉门托国王队 NBA 篮球队相关的结果,搜索引擎需要将这两个单词关联到一起,而不是单独处理它们,否则就会获得标有“萨克拉门托”的图片(例如风光图片)以及关于“国王”的图片(可能是历史上的国王),而实际上我们想要搜索的是关于篮球队的图片。这是一种搜索引擎将单词当做非独立个体(因此采用的是“朴素”方式)的经典示例。 \n将此方法应用到我们的垃圾信息分类问题上,朴素贝叶斯算法会查看每个单词,而不是将它们当做有任何联系的关联体。对于垃圾内容检测器来说,这么做通常都可行,因为有些禁用词几乎肯定会被分类为垃圾内容,例如包含“伟哥”的电子邮件通常都被归类为垃圾邮件。\n第 5 步:使用 scikit-learn 实现朴素贝叶斯\n幸运的是,sklearn 具有多个朴素贝叶斯实现,这样我们就不用从头进行计算。我们将使用 sklearns 的 sklearn.naive_bayes 方法对我们的数据集做出预测。\n具体而言,我们将使用多项式朴素贝叶斯实现。这个分类器适合分类离散特征(例如我们的单词计数文本分类)。它会将整数单词计数作为输入。另一方面,高斯朴素贝叶斯更适合连续数据,因为它假设输入数据是高斯(正态)分布。\nEnd of explanation\n'''\nInstructions:\nCompute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions\nyou made earlier stored in the 'predictions' variable.\n'''\n'''\nSolution\n'''\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nprint('Accuracy score: ', format(accuracy_score(y_test,predictions)))\nprint('Precision score: ', format(precision_score(y_test,predictions)))\nprint('Recall score: ', format(recall_score(y_test,predictions)))\nprint('F1 score: ', format(f1_score(y_test,predictions)))\nExplanation: 我们已经对测试集进行预测,现在需要检查预测的准确率了。\n第 6 步:评估模型\n我们已经对测试集进行了预测,下一个目标是评估模型的效果。我们可以采用各种衡量指标,但首先快速总结下这些指标。\n 准确率 衡量的是分类器做出正确预测的概率,即正确预测的数量与预测总数(测试数据点的数量)之比。\n 精确率 指的是分类为垃圾信息的信息实际上是垃圾信息的概率,即真正例(分类为垃圾内容并且实际上是垃圾内容的单词)与所有正例(所有分类为垃圾内容的单词,无论是否分类正确)之比,换句话说,是以下公式的比值结果:\n[True Positives/(True Positives + False Positives)]\n 召回率(敏感性)表示实际上为垃圾信息并且被分类为垃圾信息的信息所占比例,即真正例(分类为垃圾内容并且实际上是垃圾内容的单词)与所有为垃圾内容的单词之比,换句话说,是以下公式的比值结果:\n[True Positives/(True Positives + False Negatives)]\n对于偏态分类分布问题(我们的数据集就属于偏态分类),例如如果有 100 条信息,只有 2 条是垃圾信息,剩下的 98 条不是,则准确率本身并不是很好的指标。我们将 90 条信息分类为垃圾信息(包括 2 条垃圾信息,但是我们将其分类为非垃圾信息,因此它们属于假负例),并将 10 条信息分类为垃圾信息(所有 10 条都是假正例),依然会获得比较高的准确率分数。对于此类情形,精确率和召回率非常实用。可以通过这两个指标获得 F1 分数,即精确率和召回率分数的加权平均值。该分数的范围是 0 到 1,1 表示最佳潜在 F1 分数。\n我们将使用所有四个指标确保我们的模型效果很好。这四个指标的值范围都在 0 到 1 之间,分数尽量接近 1 可以很好地表示模型的效果如何。\nEnd of explanation"}}},{"rowIdx":2182,"cells":{"Unnamed: 0":{"kind":"number","value":2182,"string":"2,182"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n
Bayesian Modeling for the Busy and the Confused - Part II
\n
Markov Chain Monte-Carlo
\nCurrently, the capacity to gather data is far ahead of the ability to generate meaningful insight using conventional approaches. Hopes of alleviating this bottleneck has come through the application of machine learning tools. Among these tools one that is increasingly garnering traction is probabilistic programming, particularly Bayesian modeling. In this paradigm, variables that are used to define models carry a probabilistic distribution rather than a scalar value. \"Fitting\" a model to data can then , simplistically, be construed as finding the appropriate parameterization for these distributions, given the model structure and the data. This offers a number of advantages over other methods, not the least of which is the estimation of uncertainty around model results. This in turn can better inform subsequent processes, such as decision-making, and/or scientific discovery.\n

\nThe present is the first of a two-notebook series, the subject of which is a brief, basic, but hands-on programmatic introduction to Bayesian modeling. This notebook contains an of a few key probability principles relevant to Bayesian inference. An illustration of how to put these in practice follows. In particular, I will explain one of the conmore intuitve approaches to Bayesian computation; Grid Approximation (GA). With this framework I will show how to create simple models that can be used to interpret and predict real world data.
\nGA is computationally intensive and runs into problems quickly when the data set is large and/or the model increases in complexity. One of the more popular solutions to this problem is the use of the Markov Chain Monte-Carlo (MCMC) algorithm. The implementation of MCMC in Bayesian models will be the subject of the second notebook of this series.\n
\nAs of this writing the most popular programming language in machine learning is Python. Python is an easy language to pickup\nStep1: Under the hood\nStep2: Timing MCMC\nStep3: \nStep4: \nStep5: What's going on?\nHighly autocorrelated trace\nStep6:
\n\n
\nStep7: Back to Contents\n\nTutorial Overview\nStep8: Regression coefficients easier to interpret with centered predictor\nStep9: $$ y = \\alpha + \\beta x_c$$
\n$\\rightarrow \\alpha=y$ when $x=\\bar{x}$
\n$\\rightarrow \\beta=\\Delta y$ when $x$ increases by one unit\nStep10: Back to Contents\n\nRegression\nStep11:
\n\n
\nBack to Contents\n\nRegression\nStep12:
\n\n\n \n \n \n \n \n \n \n \n\nBack to Contents\n\nRegression\nStep14:
\n\n
\nBack to Contents\n\nRegression\nStep15: model uncertainty\nStep16:
\n \n
\nprediction uncertainty"},"code_prompt":{"kind":"string","value":"Python Code:\nimport pickle\nimport warnings\nimport sys\nfrom IPython.display import Image, HTML\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import norm as gaussian, uniform\nimport pymc3 as pm\nfrom theano import shared\nimport seaborn as sb\nimport matplotlib.pyplot as pl\nfrom matplotlib import rcParams\nfrom matplotlib import ticker as mtick\nimport arviz as ar\nprint('Versions:')\nprint('---------')\nprint(f'python: {sys.version.split(\"|\")[0]}')\nprint(f'numpy: {np.__version__}')\nprint(f'pandas: {pd.__version__}')\nprint(f'seaborn: {sb.__version__}')\nprint(f'pymc3: {pm.__version__}')\nprint(f'arviz: {ar.__version__}')\n%matplotlib inline\nwarnings.filterwarnings('ignore', category=FutureWarning)\nExplanation:
Bayesian Modeling for the Busy and the Confused - Part II
\n
Markov Chain Monte-Carlo
\nCurrently, the capacity to gather data is far ahead of the ability to generate meaningful insight using conventional approaches. Hopes of alleviating this bottleneck has come through the application of machine learning tools. Among these tools one that is increasingly garnering traction is probabilistic programming, particularly Bayesian modeling. In this paradigm, variables that are used to define models carry a probabilistic distribution rather than a scalar value. \"Fitting\" a model to data can then , simplistically, be construed as finding the appropriate parameterization for these distributions, given the model structure and the data. This offers a number of advantages over other methods, not the least of which is the estimation of uncertainty around model results. This in turn can better inform subsequent processes, such as decision-making, and/or scientific discovery.\n

\nThe present is the first of a two-notebook series, the subject of which is a brief, basic, but hands-on programmatic introduction to Bayesian modeling. This notebook contains an of a few key probability principles relevant to Bayesian inference. An illustration of how to put these in practice follows. In particular, I will explain one of the conmore intuitve approaches to Bayesian computation; Grid Approximation (GA). With this framework I will show how to create simple models that can be used to interpret and predict real world data.
\nGA is computationally intensive and runs into problems quickly when the data set is large and/or the model increases in complexity. One of the more popular solutions to this problem is the use of the Markov Chain Monte-Carlo (MCMC) algorithm. The implementation of MCMC in Bayesian models will be the subject of the second notebook of this series.\n
\nAs of this writing the most popular programming language in machine learning is Python. Python is an easy language to pickup: pedagogical resources abound. Python is free, open source, and a large number of very useful libraries have been written over the years that have propelled it to its current place of prominence in a number of fields, in addition to machine learning.\n

\nI use Python (3.6+) code to illustrate the mechanics of Bayesian inference in lieu of lengthy explanations. I also use a number of dedicated Python libraries that shortens the code considerably. A solid understanding of Bayesian modeling cannot be spoon-fed and can only come from getting one's hands dirty.. Emphasis is therefore on readable reproducible code. This should ease the work the interested has to do to get some practice re-running the notebook and experimenting with some of the coding and Bayesian modeling patterns presented. Some know-how is required regarding installing and running a Python distribution, the required libraries, and jupyter notebooks; this is easily gleaned from the internet. A popular option in the machine learning community is Anaconda.\n\nNotebook Contents\nBasics: Joint probability, Inverse probability and Bayes' Theorem\nExample: Inferring the Statistical Distribution of Chlorophyll from Data\nGrid Approximation\nImpact of priors\nImpact of data set size\nMCMC\nPyMC3\nRegression\nData Preparation\nRegression in PyMC3\nChecking Priors\nModel Fitting\nFlavors of Uncertainty\n[Final Comments](#Conclusion\nEnd of explanation\ndef mcmc(data, μ_0=0.5, n_samples=1000,):\n print(f'{data.size} data points')\n data = data.reshape(1, -1)\n # set priors\n σ=0.75 # keep σ fixed for simplicity\n trace_μ = np.nan * np.ones(n_samples) # trace: where the sampler has been\n trace_μ[0] = μ_0 # start with a first guess\n for i in range(1, n_samples):\n proposed_μ = norm.rvs(loc=trace_μ[i-1], scale=0.1, size=1)\n prop_par_dict = dict(μ=proposed_μ, σ=σ)\n curr_par_dict = dict(μ=trace_μ[i-1], σ=σ)\n log_prob_prop = get_log_lik(data, prop_par_dict\n ) + get_log_prior(prop_par_dict)\n log_prob_curr = get_log_lik(data, curr_par_dict\n ) + get_log_prior(curr_par_dict) \n ratio = np.exp(log_prob_prop - log_prob_curr)\n if ratio > 1:\n # accept proposal\n trace_μ[i] = proposed_μ\n else:\n # evaluate low proba proposal\n if uniform.rvs(size=1, loc=0, scale=1) > ratio:\n # reject proposal\n trace_μ[i] = trace_μ[i-1] \n else:\n # accept proposal\n trace_μ[i] = proposed_μ\n return trace_μ\n def get_log_lik(data, param_dict):\n return np.sum(norm.logpdf(data, loc=param_dict['μ'],\n scale=param_dict['σ']\n ),\n axis=1)\ndef get_log_prior(par_dict, loc=1, scale=1):\n return norm.logpdf(par_dict['μ'], loc=loc, scale=scale)\nExplanation: Under the hood: Inferring chlorophyll distribution\n~~Grid approximation: computing probability everywhere~~\nMagical MCMC: Dealing with computational complexity\nProbabilistic Programming with PyMC3: Industrial grade MCMC\nBack to Contents\n\nMagical MCMC: Dealing with computational complexity\nGrid approximation:\nuseful for understanding mechanics of Bayesian computation\ncomputationally intensive\nimpractical and often intractable for large data sets or high-dimension models\nMCMC allows sampling where it probabilistically matters:\ncompute current probability given location in parameter space\npropose jump to new location in parameter space\ncompute new probability at proposed location\njump to new location if $\\frac{new\\ probability}{current\\ probability}>1$ \njump to new location if $\\frac{new\\ probability}{current\\ probability}>\\gamma\\in [0, 1]$\notherwise stay in current location\nEnd of explanation\n%%time\nmcmc_n_samples = 2000\ntrace1 = mcmc(data=df_data_s.chl_l.values, n_samples=mcmc_n_samples)\nf, ax = pl.subplots(nrows=2, figsize=(8, 8))\nax[0].plot(np.arange(mcmc_n_samples), trace1, marker='.',\n ls=':', color='k')\nax[0].set_title('trace of μ, 500 data points')\nax[1].set_title('μ marginal posterior')\npm.plots.kdeplot(trace1, ax=ax[1], label='mcmc',\n color='orange', lw=2, zorder=1)\nax[1].legend(loc='upper left')\nax[1].set_ylim(bottom=0)\ndf_μ = df_grid_3.groupby(['μ']).sum().drop('σ',\n axis=1)[['post_prob']\n ].reset_index()\nax2 = ax[1].twinx()\ndf_μ.plot(x='μ', y='post_prob', ax=ax2, color='k',\n label='grid',)\nax2.set_ylim(bottom=0);\nax2.legend(loc='upper right')\nf.tight_layout()\nf.savefig('./figJar/Presentation/mcmc_1.svg')\nExplanation: Timing MCMC\nEnd of explanation\n%%time\nsamples = 2000\ntrace2 = mcmc(data=df_data.chl_l.values, n_samples=samples)\nf, ax = pl.subplots(nrows=2, figsize=(8, 8))\nax[0].plot(np.arange(samples), trace2, marker='.',\n ls=':', color='k')\nax[0].set_title(f'trace of μ, {df_data.chl_l.size} data points')\nax[1].set_title('μ marginal posterior')\npm.plots.kdeplot(trace2, ax=ax[1], label='mcmc',\n color='orange', lw=2, zorder=1)\nax[1].legend(loc='upper left')\nax[1].set_ylim(bottom=0)\nf.tight_layout()\nf.savefig('./figJar/Presentation/mcmc_2.svg')\nExplanation: \nEnd of explanation\nf, ax = pl.subplots(ncols=2, figsize=(12, 5))\nax[0].stem(pm.autocorr(trace1[1500:]))\nax[1].stem(pm.autocorr(trace2[1500:]))\nax[0].set_title(f'{df_data_s.chl_l.size} data points')\nax[1].set_title(f'{df_data.chl_l.size} data points')\nf.suptitle('trace autocorrelation', fontsize=19)\nf.savefig('./figJar/Presentation/grid8.svg')\nf, ax = pl.subplots(nrows=2, figsize=(8, 8))\nthinned_trace = np.random.choice(trace2[100:], size=200, replace=False)\nax[0].plot(np.arange(200), thinned_trace, marker='.',\n ls=':', color='k')\nax[0].set_title('thinned trace of μ')\nax[1].set_title('μ marginal posterior')\npm.plots.kdeplot(thinned_trace, ax=ax[1], label='mcmc',\n color='orange', lw=2, zorder=1)\nax[1].legend(loc='upper left')\nax[1].set_ylim(bottom=0)\nf.tight_layout()\nf.savefig('./figJar/Presentation/grid9.svg')\nf, ax = pl.subplots()\nax.stem(pm.autocorr(thinned_trace[:20]));\nf.savefig('./figJar/Presentation/stem2.svg', dpi=300, format='svg');\nExplanation: \nEnd of explanation\nwith pm.Model() as m1:\n μ_ = pm.Normal('μ', mu=1, sd=1)\n σ = pm.Uniform('σ', lower=0, upper=2)\n lkl = pm.Normal('likelihood', mu=μ_, sd=σ,\n observed=df_data.chl_l.dropna())\ngraph_m1 = pm.model_to_graphviz(m1)\ngraph_m1.format = 'svg'\ngraph_m1.render('./figJar/Presentation/graph_m1');\nExplanation: What's going on?\nHighly autocorrelated trace:
\n$\\rightarrow$ inadequate parameter space exploration
\n$\\rightarrow$ poor convergence...\nMetropolis MCMC
\n $\\rightarrow$ easy to implement + memory efficient
\n $\\rightarrow$ inefficient parameter space exploration
\n $\\rightarrow$ better MCMC sampler?\nHamiltonian Monte Carlo (HMC)\nGreatly improved convergence\nWell mixed traces are a signature and an easy diagnostic\nHMC does require a lot of tuning,\nNot practical for the inexperienced applied statistician or scientist\nNo-U-Turn Sampler (NUTS), HMC that automates most tuning steps\nNUTS scales well to complex problems with many parameters (1000's)\nImplemented in popular libraries\nProbabilistic modeling for the beginner\nUnder the hood: Inferring chlorophyll distribution\n~~Grid approximation: computing probability everywhere~~\n~~MCMC: how it works~~\nProbabilistic Programming with PyMC3: Industrial grade MCMC \nBack to Contents\n \nProbabilistic Programming with PyMC3\nrelatively simple syntax\neasily used in conjuction with mainstream python scientific data structures
\n $\\rightarrow$numpy arrays
\n $\\rightarrow$pandas dataframes\nmodels of reasonable complexity span ~10-20 lines.\nEnd of explanation\nwith m1:\n trace_m1 = pm.sample(2000, tune=1000, chains=4)\npm.traceplot(trace_m1);\nar.plot_posterior(trace_m1, kind='hist', round_to=2);\nExplanation:
\n\n
\nEnd of explanation\ndf_data.head().T\ndf_data['Gr-MxBl'] = -1 * df_data['MxBl-Gr']\nExplanation: Back to Contents\n\nTutorial Overview:\nProbabilistic modeling for the beginner
\n $\\rightarrow$~~The basics~~
\n $\\rightarrow$~~Starting easy: inferring chlorophyll~~
\n $\\rightarrow$Regression: adding a predictor to estimate chlorophyll\nBack to Contents\n\nRegression: Adding a predictor to estimate chlorophyll\nData preparation\nWriting a regression model in PyMC3\nAre my priors making sense?\nModel fitting\nFlavors of uncertainty\nLinear regression takes the form\n$$ y = \\alpha + \\beta x $$\nwhere \n $$\\ \\ \\ \\ \\ y = log_{10}(chl)$$ and $$x = log_{10}\\left(\\frac{Gr}{MxBl}\\right)$$\nEnd of explanation\ndf_data['Gr-MxBl_c'] = df_data['Gr-MxBl'] - df_data['Gr-MxBl'].mean()\ndf_data[['Gr-MxBl_c', 'chl_l']].info()\nx_c = df_data.dropna()['Gr-MxBl_c'].values\ny = df_data.dropna().chl_l.values\nExplanation: Regression coefficients easier to interpret with centered predictor:

\n$$x_c = x - \\bar{x}$$\nEnd of explanation\ng3 = sb.PairGrid(df_data.loc[:, ['Gr-MxBl_c', 'chl_l']], height=3,\n diag_sharey=False,)\ng3.map_diag(sb.kdeplot, color='k')\ng3.map_offdiag(sb.scatterplot, color='k');\nmake_lower_triangle(g3)\nf = pl.gcf()\naxs = f.get_axes()\nxlabel = r'$log_{10}\\left(\\frac{Rrs_{green}}{max(Rrs_{blue})}\\right), centered$'\nylabel = r'$log_{10}(chl)$'\naxs[0].set_xlabel(xlabel)\naxs[2].set_xlabel(xlabel)\naxs[2].set_ylabel(ylabel)\naxs[3].set_xlabel(ylabel)\nf.tight_layout()\nf.savefig('./figJar/Presentation/pairwise_1.png')\nExplanation: $$ y = \\alpha + \\beta x_c$$
\n$\\rightarrow \\alpha=y$ when $x=\\bar{x}$
\n$\\rightarrow \\beta=\\Delta y$ when $x$ increases by one unit\nEnd of explanation\nwith pm.Model() as m_vague_prior:\n # priors\n σ = pm.Uniform('σ', lower=0, upper=2)\n α = pm.Normal('α', mu=0, sd=1)\n β = pm.Normal('β', mu=0, sd=1)\n # deterministic model\n μ = α + β * x_c\n # likelihood\n chl_i = pm.Normal('chl_i', mu=μ, sd=σ, observed=y)\nExplanation: Back to Contents\n\nRegression: Adding a predictor to estimate chlorophyll\n~~Data preparation~~\nWriting a regression model in PyMC3\nAre my priors making sense?\nModel fitting\nFlavors of uncertainty\nEnd of explanation\nvague_priors = pm.sample_prior_predictive(samples=500, model=m_vague_prior, vars=['α', 'β',])\nx_dummy = np.linspace(-1.5, 1.5, num=50).reshape(-1, 1)\nα_prior_vague = vague_priors['α'].reshape(1, -1)\nβ_prior_vague = vague_priors['β'].reshape(1, -1)\nchl_l_prior_μ_vague = α_prior_vague + β_prior_vague * x_dummy\nf, ax = pl.subplots( figsize=(6, 5))\nax.plot(x_dummy, chl_l_prior_μ_vague, color='k', alpha=0.1,);\nax.set_xlabel(r'$log_{10}\\left(\\frac{green}{max(blue)}\\right)$, centered')\nax.set_ylabel('$log_{10}(chl)$')\nax.set_title('Vague priors')\nax.set_ylim(-3.5, 3.5)\nf.tight_layout(pad=1)\nf.savefig('./figJar/Presentation/prior_checks_1.png')\nExplanation:
\n\n
\nBack to Contents\n\nRegression: Adding a predictor to estimate chlorophyll\n~~Data preparation~~\n~~Writing a regression model in PyMC3~~\nAre my priors making sense?\nModel fitting \nFlavors of uncertainty\nEnd of explanation\nwith pm.Model() as m_informative_prior:\n α = pm.Normal('α', mu=0, sd=0.2)\n β = pm.Normal('β', mu=0, sd=0.5)\n σ = pm.Uniform('σ', lower=0, upper=2)\n μ = α + β * x_c\n chl_i = pm.Normal('chl_i', mu=μ, sd=σ, observed=y)\nprior_info = pm.sample_prior_predictive(model=m_informative_prior, vars=['α', 'β'])\nα_prior_info = prior_info['α'].reshape(1, -1)\nβ_prior_info = prior_info['β'].reshape(1, -1)\nchl_l_prior_info = α_prior_info + β_prior_info * x_dummy\nf, ax = pl.subplots( figsize=(6, 5))\nax.plot(x_dummy, chl_l_prior_info, color='k', alpha=0.1,);\nax.set_xlabel(r'$log_{10}\\left(\\frac{green}{max(blue}\\right)$, centered')\nax.set_ylabel('$log_{10}(chl)$')\nax.set_title('Weakly informative priors')\nax.set_ylim(-3.5, 3.5)\nf.tight_layout(pad=1)\nf.savefig('./figJar/Presentation/prior_checks_2.png')\nExplanation:
\n\n\n \n \n \n \n \n \n \n \n\nBack to Contents\n\nRegression: Adding a predictor to estimate chlorophyll\n~~Data preparatrion~~\n~~Writing a regression model in PyMC3~~\n~~Are my priors making sense?~~\nModel fitting\nFlavors of uncertainty\nEnd of explanation\nα_posterior = trace_inf.get_values('α').reshape(1, -1)\nβ_posterior = trace_inf.get_values('β').reshape(1, -1)\nσ_posterior = trace_inf.get_values('σ').reshape(1, -1)\nExplanation:
\n\n
\nBack to Contents\n\nRegression: Adding a predictor to estimate chlorophyll\n~~Data preparation~~\n~~Writing a regression model in PyMC3~~\n~~Are my priors making sense?~~\n~~Data review and model fitting~~\nFlavors of uncertainty\nTwo types of uncertainties:\n1. model uncertainty\n2. prediction uncertainty\nEnd of explanation\nμ_posterior = α_posterior + β_posterior * x_dummy\npl.plot(x_dummy, μ_posterior[:, ::16], color='k', alpha=0.1);\npl.plot(x_dummy, μ_posterior[:, 1], color='k', label='model mean')\npl.scatter(x_c, y, color='orange', edgecolor='k', alpha=0.5, label='obs'); pl.legend();\npl.ylim(-2.5, 2.5); pl.xlim(-1, 1);\npl.xlabel(r'$log_{10}\\left(\\frac{Gr}{max(Blue)}\\right)$')\npl.ylabel(r'$log_{10}(chlorophyll)$')\nf = pl.gcf()\nf.savefig('./figJar/Presentation/mu_posterior.svg')\nExplanation: model uncertainty: uncertainty around the model mean\nEnd of explanation\nppc = norm.rvs(loc=μ_posterior, scale=σ_posterior);\nci_94_perc = pm.hpd(ppc.T, alpha=0.06);\npl.scatter(x_c, y, color='orange', edgecolor='k', alpha=0.5, label='obs'); pl.legend();\npl.plot(x_dummy, ppc.mean(axis=1), color='k', label='mean prediction');\npl.fill_between(x_dummy.flatten(), ci_94_perc[:, 0], ci_94_perc[:, 1], alpha=0.5, color='k',\n label='94% credibility interval:\\n94% chance that prediction\\nwill be in here!');\npl.xlim(-1, 1); pl.ylim(-2.5, 2.5)\npl.legend(fontsize=12, loc='upper left')\nf = pl.gcf()\nf.savefig('./figJar/Presentation/ppc.svg')\nExplanation:
\n \n
\nprediction uncertainty: posterior predictive checks\nEnd of explanation"}}},{"rowIdx":2183,"cells":{"Unnamed: 0":{"kind":"number","value":2183,"string":"2,183"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2018 D. Koehn, notebook style sheet by L.A. Barba, N.C. Clementi\nStep1: Mesh generation by Transfinite Interpolation applied to the sea dike problem\nWe have implemented and tested our mesh generation approach using Transfinite Interpolation (TFI) in the previous lesson. Now, let's apply it to the problem of the sea dike with strong topography.\nRevisiting the sea dike problem\nTo generate a deformed quad mesh incorporating the strong topography of the sea dike, we only have to describe the topography by a parametrized curve. We can roughly describe it by the following equations\nStep2: Unfortunately, the TFI is defined on the unit square, so we have to normalize the sea dike topography, before applying the TFI.\nStep3: OK, now we have the normalized dike topography on a unit square, so we can define the parametric curve for the topography.\nStep4: No error so far. Before plotting the generated mesh, we have to unnormalize the spatial coordinates."},"code_prompt":{"kind":"string","value":"Python Code:\n# Execute this cell to load the notebook's style sheet, then ignore it\nfrom IPython.core.display import HTML\ncss_file = '../style/custom.css'\nHTML(open(css_file, \"r\").read())\nExplanation: Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2018 D. Koehn, notebook style sheet by L.A. Barba, N.C. Clementi\nEnd of explanation\n# Import Libraries\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Here, I introduce a new library, which is useful \n# to define the fonts and size of a figure in a notebook\nfrom pylab import rcParams\n# Get rid of a Matplotlib deprecation warning\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n# Define number of grid points in x-direction and spatial vectors\nNXtopo = 100\nx_dike = np.linspace(0.0, 61.465, num=NXtopo)\nz_dike = np.zeros(NXtopo)\n# calculate dike topograpy\ndef dike_topo(x_dike, z_dike, NX1):\n \n for i in range(NX1):\n if(x_dike[i]<4.0):\n z_dike[i] = 0.0\n \n if(x_dike[i]>=4.0 and x_dike[i]<18.5):\n z_dike[i] = (x_dike[i]-4) * 6.76/14.5\n \n if(x_dike[i]>=18.5 and x_dike[i]<22.5):\n z_dike[i] = 6.76\n \n if(x_dike[i]>=22.5 and x_dike[i] unnormalized x-coordinate x_d\n x_d = xmax_dike * s\n z_d = 0.0\n \n if(x_d<4.0):\n z_d = 0.0\n \n if(x_d>=4.0 and x_d<18.5):\n z_d = (x_d-4) * 6.76/14.5\n \n if(x_d>=18.5 and x_d<22.5):\n z_d = 6.76\n \n if(x_d>=22.5 and x_d normalized z-coordinate z\n z = z_d / zmax_dike + 1\n xzt = [x,z]\n \n return xzt\n# ... left boundary\ndef Xl(s):\n \n x = 0.0\n z = s \n xzl = [x,z]\n \n return xzl\n# ... right boundary\ndef Xr(s):\n \n x = 1\n z = s\n \n xzr = [x,z]\n \n return xzr\n# Transfinite interpolation\n# Discretize along xi and eta axis\nxi = np.linspace(0.0, 1.0, num=NX)\neta = np.linspace(0.0, 1.0, num=NZ)\nxi1, eta1 = np.meshgrid(xi, eta)\n# Intialize matrices for x and z axis\nX = np.zeros((NX,NZ))\nZ = np.zeros((NX,NZ))\n# loop over cells\nfor i in range(NX):\n Xi = xi[i]\n for j in range(NZ):\n Eta = eta[j]\n \n xb = Xb(Xi)\n xb0 = Xb(0)\n xb1 = Xb(1)\n \n xt = Xt(Xi)\n xt0 = Xt(0)\n xt1 = Xt(1)\n \n xl = Xl(Eta)\n xr = Xr(Eta)\n # Transfinite Interpolation (Gordon-Hall algorithm)\n X[i,j] = (1-Eta) * xb[0] + Eta * xt[0] + (1-Xi) * xl[0] + Xi * xr[0] \\\n - (Xi * Eta * xt1[0] + Xi * (1-Eta) * xb1[0] + Eta * (1-Xi) * xt0[0] \\\n + (1-Xi) * (1-Eta) * xb0[0])\n \n Z[i,j] = (1-Eta) * xb[1] + Eta * xt[1] + (1-Xi) * xl[1] + Xi * xr[1] \\\n - (Xi * Eta * xt1[1] + Xi * (1-Eta) * xb1[1] + Eta * (1-Xi) * xt0[1] \\\n + (1-Xi) * (1-Eta) * xb0[1]) \nExplanation: OK, now we have the normalized dike topography on a unit square, so we can define the parametric curve for the topography.\nEnd of explanation\n# Unnormalize the mesh \nX = X * xmax_dike\nZ = Z * zmax_dike\n# Plot TFI mesh (physical domain)\nplt.plot(X, Z, 'k')\nplt.plot(X.T, Z.T, 'k')\nplt.title(\"Sea dike TFI grid (physical domain)\" )\nplt.xlabel(\"x [m]\")\nplt.ylabel(\"z [m]\")\nplt.axes().set_aspect('equal')\nplt.savefig('sea_dike_TFI.pdf', bbox_inches='tight', format='pdf')\nplt.show()\nExplanation: No error so far. Before plotting the generated mesh, we have to unnormalize the spatial coordinates.\nEnd of explanation"}}},{"rowIdx":2184,"cells":{"Unnamed: 0":{"kind":"number","value":2184,"string":"2,184"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Does age correlate with motion?\nThis has been bothering me for so many of our slack chats that I felt I really needed to start here.\nWhat do we know about motion in our sample!?\nStep1: Get the data\nStep2: Motion measures\nThere are two measures we care about that can be used to index motion\nStep3: We can see from the plot above that we have a data set of people who do not move all that much and that these two measures correlate well for low motion scans but start to diverge for the scans that have higher motion.\nSo, back to our original question, does motion correlate with age?\nStep4: Yes! It does, and you can see that this correlation is stronger for func_perc_fd. I don't think this is really particularly important and I suspect it is driven by the kurtosis of the distribution. The func_mean_fd distribution is more non-normal (less normal?) than the func_perc_fd and I wonder if this causing the correlation to look messier. To be honest, I don't know and I don't really care. If this is what makes a big difference to our results I'll start to care more ;)\nBut hang on, we all know that it's important to look at the data so lets make a plot\nStep5: Well. That's underinspiring. Does that really count as a significant correlation? Gun to your head would you put that line there?\nHow does this correlation change when we look at different subsets of data? Specifically different age ranges, motion thresholds and sample sizes.\nHow does sample size affect the relationship between age and motion?\nThe following plots show how sample size affects the relationship between age and motion (pearson's r).\nI've kept the whole age range (6-18) and I'll show the same plot for 3 different motion thresholds (5%, 15%, 50% bad frames) and for a range of different sample sizes (25, 50, 75, 100, 125 and 150 participants each).\nStep6: What I take from this plot is that there is a negative correlation between age and head motion (the older you are the less you move) and that the more participants we have in a sample the more consistent the measure (the narrower the box)\nAs John has said multiple times\nStep7: What I take from this plot is that the correlation with age is less strong when you are more stringent in your exclusion criteria. Which makes sense\nStep8: Woah - that's interesting. In this sample we seem to only be able to detect a movement relationship for a 5 year age range (remember that the upper and lower limits are inclusive) when the participants are either 10-14 or 12-16 years old!\nIs this pattern related to the threshold? What if we change that?\nStep9: So, this to me is the crazy bit that I need to get my head around"},"code_prompt":{"kind":"string","value":"Python Code:\nimport matplotlib.pylab as plt\n%matplotlib inline\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nsns.set_style('white')\nsns.set_context('notebook')\nfrom scipy.stats import kurtosis\nimport sys\n%load_ext autoreload\n%autoreload 2\nsys.path.append('../SCRIPTS/')\nimport kidsmotion_stats as kms\nimport kidsmotion_datamanagement as kmdm\nimport kidsmotion_plotting as kmp\nExplanation: Does age correlate with motion?\nThis has been bothering me for so many of our slack chats that I felt I really needed to start here.\nWhat do we know about motion in our sample!?\nEnd of explanation\nbehav_data_f = '../Phenotypic_V1_0b_preprocessed1.csv'\nbehav_df = kmdm.read_in_behavdata(behav_data_f)\nExplanation: Get the data\nEnd of explanation\nfig, ax_list = kmp.histogram_motion(behav_df)\n# Note that there is a warning here but don't worry about it :P\nExplanation: Motion measures\nThere are two measures we care about that can be used to index motion:\nfunc_mean_fd: mean framewise displacement, measured in mm\nfunc_perc_fd: percentage of frames that were more than 0.2mm displaced from the previous frame.\nEnd of explanation\nfor var in ['func_mean_fd', 'func_perc_fd']:\n print(var)\n print(' kurtosis = {:2.1f}'.format(kurtosis(behav_df[var])))\n print(' corr with age:')\n kms.report_correlation(behav_df, 'AGE_AT_SCAN', var, covar_name=None, r_dp=2)\nExplanation: We can see from the plot above that we have a data set of people who do not move all that much and that these two measures correlate well for low motion scans but start to diverge for the scans that have higher motion.\nSo, back to our original question, does motion correlate with age?\nEnd of explanation\nfig, ax_list = kmp.corr_motion_age(behav_df, fit_reg=False)\nfig, ax_list = kmp.corr_motion_age(behav_df)\nExplanation: Yes! It does, and you can see that this correlation is stronger for func_perc_fd. I don't think this is really particularly important and I suspect it is driven by the kurtosis of the distribution. The func_mean_fd distribution is more non-normal (less normal?) than the func_perc_fd and I wonder if this causing the correlation to look messier. To be honest, I don't know and I don't really care. If this is what makes a big difference to our results I'll start to care more ;)\nBut hang on, we all know that it's important to look at the data so lets make a plot:\nEnd of explanation\nage_l = 6\nage_u = 18\nmotion_measure='func_perc_fd'\nn_perms = 100\nmotion_thresh = 50\ncorr_age_df = pd.DataFrame()\nfor n in [ 25, 50, 75, 100, 125, 150 ]:\n filtered_df = kmdm.filter_data(behav_df, motion_thresh, age_l, age_u, motion_measure=motion_measure)\n r_list = []\n for i in range(n_perms):\n sample_df = kmdm.select_random_sample(filtered_df, n=n)\n r, p = kms.calculate_correlation(sample_df, 'AGE_AT_SCAN', motion_measure, covar_name=None)\n r_list+=[r]\n corr_age_df['N{:2.0f}'.format(n)] = r_list\nfig, ax = kmp.compare_groups_boxplots(corr_age_df, title='Thr: {:1.0f}%'.format(motion_thresh))\nExplanation: Well. That's underinspiring. Does that really count as a significant correlation? Gun to your head would you put that line there?\nHow does this correlation change when we look at different subsets of data? Specifically different age ranges, motion thresholds and sample sizes.\nHow does sample size affect the relationship between age and motion?\nThe following plots show how sample size affects the relationship between age and motion (pearson's r).\nI've kept the whole age range (6-18) and I'll show the same plot for 3 different motion thresholds (5%, 15%, 50% bad frames) and for a range of different sample sizes (25, 50, 75, 100, 125 and 150 participants each).\nEnd of explanation\nage_l = 6\nage_u = 18\nmotion_measure='func_perc_fd'\nn = 100\nn_perms = 100\ncorr_age_df = pd.DataFrame()\nfor motion_thresh in [ 5, 10, 25, 50 ]:\n filtered_df = kmdm.filter_data(behav_df, motion_thresh, age_l, age_u, motion_measure=motion_measure)\n r_list = []\n for i in range(n_perms):\n sample_df = kmdm.select_random_sample(filtered_df, n=n)\n r, p = kms.calculate_correlation(sample_df, 'AGE_AT_SCAN', motion_measure, covar_name=None)\n r_list+=[r]\n corr_age_df['Thr{:1.0f}'.format(motion_thresh)] = r_list\nfig, ax = kmp.compare_groups_boxplots(corr_age_df, title='N: {:1.0f}'.format(n))\nExplanation: What I take from this plot is that there is a negative correlation between age and head motion (the older you are the less you move) and that the more participants we have in a sample the more consistent the measure (the narrower the box)\nAs John has said multiple times: the fact that more people gives you a better estimate of the population is kinda known already :P\nSo now we move to look at how the different thresholds affect this correlation...\nHow does the motion cut off affect the relationship between age and motion?\nEnd of explanation\nmotion_measure='func_perc_fd'\nn = 100\nn_perms = 100\nmotion_thresh = 25\ncorr_age_df = pd.DataFrame()\nfor age_l in [ 6, 8, 10, 12, 14 ]:\n age_u = age_l + 4\n filtered_df = kmdm.filter_data(behav_df, motion_thresh, age_l, age_u, motion_measure=motion_measure)\n r_list = []\n for i in range(n_perms):\n sample_df = kmdm.select_random_sample(filtered_df, n=n)\n r, p = kms.calculate_correlation(sample_df, 'AGE_AT_SCAN', motion_measure, covar_name=None)\n r_list+=[r]\n corr_age_df['{:1.0f} to {:1.0f}'.format(age_l, age_u)] = r_list\nfig, ax = kmp.compare_groups_boxplots(corr_age_df, title='N: {:1.0f}; Thr: {:1.0f}%'.format(n, motion_thresh))\nExplanation: What I take from this plot is that the correlation with age is less strong when you are more stringent in your exclusion criteria. Which makes sense: we're more likely to remove younger people and therefore reduce the correlation with age.\nNext on the list is age range, do we see the same pattern across different ages?\nHow does the age range of our cohort affect the relationship between age and motion?\nEnd of explanation\nmotion_measure='func_perc_fd'\nn = 100\nn_perms = 100\nmotion_thresh = 25\nfor motion_thresh in [ 5, 10, 25, 50 ]:\n corr_age_df = pd.DataFrame()\n for age_l in [ 6, 8, 10, 12, 14 ]:\n age_u = age_l + 4\n filtered_df = kmdm.filter_data(behav_df, motion_thresh, age_l, age_u, motion_measure=motion_measure)\n r_list = []\n for i in range(n_perms):\n sample_df = kmdm.select_random_sample(filtered_df, n=n)\n r, p = kms.calculate_correlation(sample_df, 'AGE_AT_SCAN', motion_measure, covar_name=None)\n r_list+=[r]\n corr_age_df['{:1.0f} to {:1.0f}'.format(age_l, age_u)] = r_list\n fig, ax = kmp.compare_groups_boxplots(corr_age_df, title='N: {:1.0f}; Thr: {:1.0f}%'.format(n, motion_thresh))\nExplanation: Woah - that's interesting. In this sample we seem to only be able to detect a movement relationship for a 5 year age range (remember that the upper and lower limits are inclusive) when the participants are either 10-14 or 12-16 years old!\nIs this pattern related to the threshold? What if we change that?\nEnd of explanation\nmotion_measure='func_perc_fd'\nn = 30\nn_perms = 100\nmotion_thresh = 25\nfor motion_thresh in [ 5, 10, 25, 50 ]:\n corr_age_df = pd.DataFrame()\n for age_l in [ 6, 8, 10, 12, 14 ]:\n age_u = age_l + 4\n filtered_df = kmdm.filter_data(behav_df, motion_thresh, age_l, age_u, motion_measure=motion_measure)\n r_list = []\n for i in range(n_perms):\n sample_df = kmdm.select_random_sample(filtered_df, n=n)\n r, p = kms.calculate_correlation(sample_df, 'AGE_AT_SCAN', motion_measure, covar_name=None)\n r_list+=[r]\n corr_age_df['{:1.0f} to {:1.0f}'.format(age_l, age_u)] = r_list\n fig, ax = kmp.compare_groups_boxplots(corr_age_df, title='N: {:1.0f}; Thr: {:1.0f}%'.format(n, motion_thresh))\nExplanation: So, this to me is the crazy bit that I need to get my head around: there's different relationships with age for different thresholds. Which means, I think, that any of our results will change according to the thresholds we apply.\nNow, I also want to see if we get the same pattern with a smaller number of participants in our cohort (you can see that we have fewer than 100 people in the very youngest group).\nEnd of explanation"}}},{"rowIdx":2185,"cells":{"Unnamed: 0":{"kind":"number","value":2185,"string":"2,185"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n What about writing SVG inside a cell in IPython or Jupyter\nStep1: let's create a very simple SVG file\nStep2: Now let's create a Svg Scene based\ninspired from Isendrak Skatasmid code at "},"code_prompt":{"kind":"string","value":"Python Code:\n%config InlineBackend.figure_format = 'svg'\nurl_svg = 'http://clipartist.net/social/clipartist.net/B/base_tux_g_v_linux.svg'\nfrom IPython.display import SVG, display, HTML\n# testing svg inside jupyter next one does not support width parameter at the time of writing\n#display(SVG(url=url_svg))\ndisplay(HTML(''))\nExplanation: What about writing SVG inside a cell in IPython or Jupyter\nEnd of explanation\n%%writefile basic_circle.svg\n\n \n\nurl_svg = 'basic_circle.svg'\nHTML('')\nExplanation: let's create a very simple SVG file\nEnd of explanation\nclass SvgScene:\n \n def __init__(self,name=\"svg\",height=500,width=500):\n self.name = name\n self.items = []\n self.height = height\n self.width = width\n return\n def add(self,item): self.items.append(item)\n def strarray(self):\n var = [\"\\n\",\n \"\\n\" % (self.height,self.width),\n \" \\n\"]\n for item in self.items: var += item.strarray() \n var += [\" \\n\\n\"]\n return var\n def write_svg(self,filename=None):\n if filename:\n self.svgname = filename\n else:\n self.svgname = self.name + \".svg\"\n file = open(self.svgname,'w')\n file.writelines(self.strarray())\n file.close()\n return\n def display(self):\n url_svg = self.svgname \n display(HTML(''))\n return\nclass Line:\n def __init__(self,start,end,color,width):\n self.start = start\n self.end = end\n self.color = color\n self.width = width\n return\n def strarray(self):\n return [\" \\n\" %\\\n (self.start[0],self.start[1],self.end[0],self.end[1],colorstr(self.color),self.width)]\nclass Circle:\n def __init__(self,center,radius,fill_color,line_color,line_width):\n self.center = center\n self.radius = radius\n self.fill_color = fill_color\n self.line_color = line_color\n self.line_width = line_width\n return\n def strarray(self):\n return [\" \\n\" % (colorstr(self.fill_color),colorstr(self.line_color),self.line_width)]\nclass Ellipse:\n def __init__(self,center,radius_x,radius_y,fill_color,line_color,line_width):\n self.center = center\n self.radiusx = radius_x\n self.radiusy = radius_y\n self.fill_color = fill_color\n self.line_color = line_color\n self.line_width = line_width\n def strarray(self):\n return [\" \\n\" % (colorstr(self.fill_color),colorstr(self.line_color),self.line_width)]\nclass Polygon:\n def __init__(self,points,fill_color,line_color,line_width):\n self.points = points\n self.fill_color = fill_color\n self.line_color = line_color\n self.line_width = line_width\n def strarray(self):\n polygon=\"\\n\" %\\\n (colorstr(self.fill_color),colorstr(self.line_color),self.line_width)]\nclass Rectangle:\n def __init__(self,origin,height,width,fill_color,line_color,line_width):\n self.origin = origin\n self.height = height\n self.width = width\n self.fill_color = fill_color\n self.line_color = line_color\n self.line_width = line_width\n return\n def strarray(self):\n return [\" \\n\" %\\\n (self.width,colorstr(self.fill_color),colorstr(self.line_color),self.line_width)]\nclass Text:\n def __init__(self,origin,text,size,color):\n self.origin = origin\n self.text = text\n self.size = size\n self.color = color\n return\n def strarray(self):\n return [\" \\n\" %\\\n (self.origin[0],self.origin[1],self.size,colorstr(self.color)),\n \" %s\\n\" % self.text,\n \" \\n\"]\ndef colorstr(rgb): return \"#%x%x%x\" % (rgb[0]/16,rgb[1]/16,rgb[2]/16)\nscene = SvgScene(\"test\",300,300)\nscene.add(Rectangle((100,100),200,200,(0,255,255),(0,0,0),1))\nscene.add(Line((200,200),(200,300),(0,0,0),1))\nscene.add(Line((200,200),(300,200),(0,0,0),1))\nscene.add(Line((200,200),(100,200),(0,0,0),1))\nscene.add(Line((200,200),(200,100),(0,0,0),1))\nscene.add(Circle((200,200),30,(0,0,255),(0,0,0),1))\nscene.add(Circle((200,300),30,(0,255,0),(0,0,0),1))\nscene.add(Circle((300,200),30,(255,0,0),(0,0,0),1))\nscene.add(Circle((100,200),30,(255,255,0),(0,0,0),1))\nscene.add(Circle((200,100),30,(255,0,255),(0,0,0),1))\nscene.add(Text((50,50),\"Testing SVG 1\",24,(0,0,0)))\nscene.write_svg()\nscene.display()\nExplanation: Now let's create a Svg Scene based\ninspired from Isendrak Skatasmid code at :\nhttp://code.activestate.com/recipes/578123-draw-svg-images-in-python-python-recipe-enhanced-v/\nEnd of explanation"}}},{"rowIdx":2186,"cells":{"Unnamed: 0":{"kind":"number","value":2186,"string":"2,186"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Vertex client library\nStep1: Install the latest GA version of google-cloud-storage library as well.\nStep2: Restart the kernel\nOnce you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages.\nStep3: Before you begin\nGPU runtime\nMake sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\nMake sure that billing is enabled for your project.\nEnable the Vertex APIs and Compute Engine APIs.\nThe Google Cloud SDK is already installed in Google Cloud Notebook.\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\nNote\nStep4: Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.\nAmericas\nStep5: Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.\nStep6: Authenticate your Google Cloud account\nIf you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps\nStep7: Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants\nImport Vertex client library\nImport the Vertex client library into our Python environment.\nStep8: Vertex constants\nSetup up the following constants for Vertex\nStep9: AutoML constants\nSet constants unique to AutoML datasets and training\nStep10: Hardware Accelerators\nSet the hardware accelerators (e.g., GPU), if any, for prediction.\nSet the variable DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify\nStep11: Container (Docker) image\nFor AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected.\nMachine Type\nNext, set the machine type to use for prediction.\nSet the variable DEPLOY_COMPUTE to configure the compute resources for the VM you will use for prediction.\nmachine type\nn1-standard\nStep12: Tutorial\nNow you are ready to start creating your own AutoML tabular binary classification model.\nSet up clients\nThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.\nYou will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.\nDataset Service for Dataset resources.\nModel Service for Model resources.\nPipeline Service for training.\nEndpoint Service for deployment.\nPrediction Service for serving.\nStep13: Dataset\nNow that your clients are ready, your first step is to create a Dataset resource instance. This step differs from Vision, Video and Language. For those products, after the Dataset resource is created, one then separately imports the data, using the import_data method.\nFor tabular, importing of the data is deferred until the training pipeline starts training the model. What do we do different? Well, first you won't be calling the import_data method. Instead, when you create the dataset instance you specify the Cloud Storage location of the CSV file or BigQuery location of the data table, which contains your tabular data as part of the Dataset resource's metadata.\nCloud Storage\nmetadata = {\"input_config\"\nStep14: Quick peek at your data\nYou will use a version of the Bank Marketing dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.\nYou also need for training to know the heading name of the label column, which is save as label_column. For this dataset, it is the last column in the CSV file.\nStep15: Dataset\nNow that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it.\nCreate Dataset resource instance\nUse the helper function create_dataset to create the instance of a Dataset resource. This function does the following\nStep16: Now save the unique dataset identifier for the Dataset resource instance you created.\nStep17: Train the model\nNow train an AutoML tabular binary classification model using your Vertex Dataset resource. To train the model, do the following steps\nStep18: Construct the task requirements\nNext, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the task field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the json_format.ParseDict method for the conversion.\nThe minimal fields you need to specify are\nStep19: Now save the unique identifier of the training pipeline you created.\nStep20: Get information on a training pipeline\nNow get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter\nStep21: Deployment\nTraining the above model may take upwards of 30 minutes time.\nOnce your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name.\nStep22: Model information\nNow that your model is trained, you can get some information on your model.\nEvaluate the Model resource\nNow find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model.\nList evaluations for all slices\nUse this helper function list_model_evaluations, which takes the following parameter\nStep23: Deploy the Model resource\nNow deploy the trained Vertex Model resource you created with AutoML. This requires two steps\nStep24: Now get the unique identifier for the Endpoint resource you created.\nStep25: Compute instance scaling\nYou have several choices on scaling the compute instances for handling your online prediction requests\nStep26: Deploy Model resource to the Endpoint resource\nUse this helper function deploy_model to deploy the Model resource to the Endpoint resource you created for serving predictions, with the following parameters\nStep27: Make a online prediction request\nNow do a online prediction to your deployed model.\nMake test item\nYou will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.\nStep28: Make a prediction\nNow you have a test item. Use this helper function predict_item, which takes the following parameters\nStep29: Undeploy the Model resource\nNow undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters\nStep30: Cleaning up\nTo clean up all GCP resources used in this project, you can delete the GCP\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial"},"code_prompt":{"kind":"string","value":"Python Code:\nimport os\nimport sys\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n! pip3 install -U google-cloud-aiplatform $USER_FLAG\nExplanation: Vertex client library: AutoML tabular binary classification model for online prediction\n\n \n \n
\n \n \"Colab Run in Colab\n \n \n \n \"GitHub\n View on GitHub\n \n
\n


\nOverview\nThis tutorial demonstrates how to use the Vertex client library for Python to create tabular binary classification models and do online prediction using Google Cloud's AutoML.\nDataset\nThe dataset used for this tutorial is the Bank Marketing. This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.\nObjective\nIn this tutorial, you create an AutoML tabular binary classification model and deploy for online prediction from a Python script using the Vertex client library. You can alternatively create and deploy models using the gcloud command-line tool or online using the Google Cloud Console.\nThe steps performed include:\nCreate a Vertex Dataset resource.\nTrain the model.\nView the model evaluation.\nDeploy the Model resource to a serving Endpoint resource.\nMake a prediction.\nUndeploy the Model.\nCosts\nThis tutorial uses billable components of Google Cloud (GCP):\nVertex AI\nCloud Storage\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nInstallation\nInstall the latest version of Vertex client library.\nEnd of explanation\n! pip3 install -U google-cloud-storage $USER_FLAG\nExplanation: Install the latest GA version of google-cloud-storage library as well.\nEnd of explanation\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)\nExplanation: Restart the kernel\nOnce you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages.\nEnd of explanation\nPROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n! gcloud config set project $PROJECT_ID\nExplanation: Before you begin\nGPU runtime\nMake sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\nMake sure that billing is enabled for your project.\nEnable the Vertex APIs and Compute Engine APIs.\nThe Google Cloud SDK is already installed in Google Cloud Notebook.\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.\nEnd of explanation\nREGION = \"us-central1\" # @param {type: \"string\"}\nExplanation: Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\nYou may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the Vertex locations documentation\nEnd of explanation\nfrom datetime import datetime\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")\nExplanation: Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.\nEnd of explanation\n# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n google_auth.authenticate_user()\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''\nExplanation: Authenticate your Google Cloud account\nIf you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select Vertex Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.\nEnd of explanation\nimport time\nfrom google.cloud.aiplatform import gapic as aip\nfrom google.protobuf import json_format\nfrom google.protobuf.json_format import MessageToJson, ParseDict\nfrom google.protobuf.struct_pb2 import Struct, Value\nExplanation: Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants\nImport Vertex client library\nImport the Vertex client library into our Python environment.\nEnd of explanation\n# API service endpoint\nAPI_ENDPOINT = \"{}-aiplatform.googleapis.com\".format(REGION)\n# Vertex location root path for your dataset, model and endpoint resources\nPARENT = \"projects/\" + PROJECT_ID + \"/locations/\" + REGION\nExplanation: Vertex constants\nSetup up the following constants for Vertex:\nAPI_ENDPOINT: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.\nPARENT: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.\nEnd of explanation\n# Tabular Dataset type\nDATA_SCHEMA = \"gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml\"\n# Tabular Labeling type\nLABEL_SCHEMA = (\n \"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml\"\n)\n# Tabular Training task\nTRAINING_SCHEMA = \"gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml\"\nExplanation: AutoML constants\nSet constants unique to AutoML datasets and training:\nDataset Schemas: Tells the Dataset resource service which type of dataset it is.\nData Labeling (Annotations) Schemas: Tells the Dataset resource service how the data is labeled (annotated).\nDataset Training Schemas: Tells the Pipeline resource service the task (e.g., classification) to train the model for.\nEnd of explanation\nif os.getenv(\"IS_TESTING_DEPOLY_GPU\"):\n DEPLOY_GPU, DEPLOY_NGPU = (\n aip.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_DEPOLY_GPU\")),\n )\nelse:\n DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)\nExplanation: Hardware Accelerators\nSet the hardware accelerators (e.g., GPU), if any, for prediction.\nSet the variable DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:\n(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)\nFor GPU, available accelerators include:\n - aip.AcceleratorType.NVIDIA_TESLA_K80\n - aip.AcceleratorType.NVIDIA_TESLA_P100\n - aip.AcceleratorType.NVIDIA_TESLA_P4\n - aip.AcceleratorType.NVIDIA_TESLA_T4\n - aip.AcceleratorType.NVIDIA_TESLA_V100\nOtherwise specify (None, None) to use a container image to run on a CPU.\nEnd of explanation\nif os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\nVCPU = \"4\"\nDEPLOY_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Deploy machine type\", DEPLOY_COMPUTE)\nExplanation: Container (Docker) image\nFor AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected.\nMachine Type\nNext, set the machine type to use for prediction.\nSet the variable DEPLOY_COMPUTE to configure the compute resources for the VM you will use for prediction.\nmachine type\nn1-standard: 3.75GB of memory per vCPU.\nn1-highmem: 6.5GB of memory per vCPU\nn1-highcpu: 0.9 GB of memory per vCPU\nvCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]\nNote: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs\nEnd of explanation\n# client options same for all services\nclient_options = {\"api_endpoint\": API_ENDPOINT}\ndef create_dataset_client():\n client = aip.DatasetServiceClient(client_options=client_options)\n return client\ndef create_model_client():\n client = aip.ModelServiceClient(client_options=client_options)\n return client\ndef create_pipeline_client():\n client = aip.PipelineServiceClient(client_options=client_options)\n return client\ndef create_endpoint_client():\n client = aip.EndpointServiceClient(client_options=client_options)\n return client\ndef create_prediction_client():\n client = aip.PredictionServiceClient(client_options=client_options)\n return client\nclients = {}\nclients[\"dataset\"] = create_dataset_client()\nclients[\"model\"] = create_model_client()\nclients[\"pipeline\"] = create_pipeline_client()\nclients[\"endpoint\"] = create_endpoint_client()\nclients[\"prediction\"] = create_prediction_client()\nfor client in clients.items():\n print(client)\nExplanation: Tutorial\nNow you are ready to start creating your own AutoML tabular binary classification model.\nSet up clients\nThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.\nYou will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.\nDataset Service for Dataset resources.\nModel Service for Model resources.\nPipeline Service for training.\nEndpoint Service for deployment.\nPrediction Service for serving.\nEnd of explanation\nIMPORT_FILE = \"gs://cloud-ml-tables-data/bank-marketing.csv\"\nExplanation: Dataset\nNow that your clients are ready, your first step is to create a Dataset resource instance. This step differs from Vision, Video and Language. For those products, after the Dataset resource is created, one then separately imports the data, using the import_data method.\nFor tabular, importing of the data is deferred until the training pipeline starts training the model. What do we do different? Well, first you won't be calling the import_data method. Instead, when you create the dataset instance you specify the Cloud Storage location of the CSV file or BigQuery location of the data table, which contains your tabular data as part of the Dataset resource's metadata.\nCloud Storage\nmetadata = {\"input_config\": {\"gcs_source\": {\"uri\": [gcs_uri]}}}\nThe format for a Cloud Storage path is:\ngs://[bucket_name]/[folder(s)/[file]\nBigQuery\nmetadata = {\"input_config\": {\"bigquery_source\": {\"uri\": [gcs_uri]}}}\nThe format for a BigQuery path is:\nbq://[collection].[dataset].[table]\nNote that the uri field is a list, whereby you can input multiple CSV files or BigQuery tables when your data is split across files.\nData preparation\nThe Vertex Dataset resource for tabular has a couple of requirements for your tabular data.\nMust be in a CSV file or a BigQuery query.\nCSV\nFor tabular binary classification, the CSV file has a few requirements:\nThe first row must be the heading -- note how this is different from Vision, Video and Language where the requirement is no heading.\nAll but one column are features.\nOne column is the label, which you will specify when you subsequently create the training pipeline.\nLocation of Cloud Storage training data.\nNow set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.\nEnd of explanation\ncount = ! gsutil cat $IMPORT_FILE | wc -l\nprint(\"Number of Examples\", int(count[0]))\nprint(\"First 10 rows\")\n! gsutil cat $IMPORT_FILE | head\nheading = ! gsutil cat $IMPORT_FILE | head -n1\nlabel_column = str(heading).split(\",\")[-1].split(\"'\")[0]\nprint(\"Label Column Name\", label_column)\nif label_column is None:\n raise Exception(\"label column missing\")\nExplanation: Quick peek at your data\nYou will use a version of the Bank Marketing dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.\nYou also need for training to know the heading name of the label column, which is save as label_column. For this dataset, it is the last column in the CSV file.\nEnd of explanation\nTIMEOUT = 90\ndef create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT):\n start_time = time.time()\n try:\n if src_uri.startswith(\"gs://\"):\n metadata = {\"input_config\": {\"gcs_source\": {\"uri\": [src_uri]}}}\n elif src_uri.startswith(\"bq://\"):\n metadata = {\"input_config\": {\"bigquery_source\": {\"uri\": [src_uri]}}}\n dataset = aip.Dataset(\n display_name=name,\n metadata_schema_uri=schema,\n labels=labels,\n metadata=json_format.ParseDict(metadata, Value()),\n )\n operation = clients[\"dataset\"].create_dataset(parent=PARENT, dataset=dataset)\n print(\"Long running operation:\", operation.operation.name)\n result = operation.result(timeout=TIMEOUT)\n print(\"time:\", time.time() - start_time)\n print(\"response\")\n print(\" name:\", result.name)\n print(\" display_name:\", result.display_name)\n print(\" metadata_schema_uri:\", result.metadata_schema_uri)\n print(\" metadata:\", dict(result.metadata))\n print(\" create_time:\", result.create_time)\n print(\" update_time:\", result.update_time)\n print(\" etag:\", result.etag)\n print(\" labels:\", dict(result.labels))\n return result\n except Exception as e:\n print(\"exception:\", e)\n return None\nresult = create_dataset(\"bank-\" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE)\nExplanation: Dataset\nNow that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it.\nCreate Dataset resource instance\nUse the helper function create_dataset to create the instance of a Dataset resource. This function does the following:\nUses the dataset client service.\nCreates an Vertex Dataset resource (aip.Dataset), with the following parameters:\ndisplay_name: The human-readable name you choose to give it.\nmetadata_schema_uri: The schema for the dataset type.\nmetadata: The Cloud Storage or BigQuery location of the tabular data.\nCalls the client dataset service method create_dataset, with the following parameters:\nparent: The Vertex location root path for your Database, Model and Endpoint resources.\ndataset: The Vertex dataset object instance you created.\nThe method returns an operation object.\nAn operation object is how Vertex handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning.\nYou can use the operation object to get status on the operation (e.g., create Dataset resource) or to cancel the operation, by invoking an operation method:\n| Method | Description |\n| ----------- | ----------- |\n| result() | Waits for the operation to complete and returns a result object in JSON format. |\n| running() | Returns True/False on whether the operation is still running. |\n| done() | Returns True/False on whether the operation is completed. |\n| canceled() | Returns True/False on whether the operation was canceled. |\n| cancel() | Cancels the operation (this may take up to 30 seconds). |\nEnd of explanation\n# The full unique ID for the dataset\ndataset_id = result.name\n# The short numeric ID for the dataset\ndataset_short_id = dataset_id.split(\"/\")[-1]\nprint(dataset_id)\nExplanation: Now save the unique dataset identifier for the Dataset resource instance you created.\nEnd of explanation\ndef create_pipeline(pipeline_name, model_name, dataset, schema, task):\n dataset_id = dataset.split(\"/\")[-1]\n input_config = {\n \"dataset_id\": dataset_id,\n \"fraction_split\": {\n \"training_fraction\": 0.8,\n \"validation_fraction\": 0.1,\n \"test_fraction\": 0.1,\n },\n }\n training_pipeline = {\n \"display_name\": pipeline_name,\n \"training_task_definition\": schema,\n \"training_task_inputs\": task,\n \"input_data_config\": input_config,\n \"model_to_upload\": {\"display_name\": model_name},\n }\n try:\n pipeline = clients[\"pipeline\"].create_training_pipeline(\n parent=PARENT, training_pipeline=training_pipeline\n )\n print(pipeline)\n except Exception as e:\n print(\"exception:\", e)\n return None\n return pipeline\nExplanation: Train the model\nNow train an AutoML tabular binary classification model using your Vertex Dataset resource. To train the model, do the following steps:\nCreate an Vertex training pipeline for the Dataset resource.\nExecute the pipeline to start the training.\nCreate a training pipeline\nYou may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:\nBeing reusable for subsequent training jobs.\nCan be containerized and ran as a batch job.\nCan be distributed.\nAll the steps are associated with the same pipeline job for tracking progress.\nUse this helper function create_pipeline, which takes the following parameters:\npipeline_name: A human readable name for the pipeline job.\nmodel_name: A human readable name for the model.\ndataset: The Vertex fully qualified dataset identifier.\nschema: The dataset labeling (annotation) training schema.\ntask: A dictionary describing the requirements for the training job.\nThe helper function calls the Pipeline client service'smethod create_pipeline, which takes the following parameters:\nparent: The Vertex location root path for your Dataset, Model and Endpoint resources.\ntraining_pipeline: the full specification for the pipeline training job.\nLet's look now deeper into the minimal requirements for constructing a training_pipeline specification:\ndisplay_name: A human readable name for the pipeline job.\ntraining_task_definition: The dataset labeling (annotation) training schema.\ntraining_task_inputs: A dictionary describing the requirements for the training job.\nmodel_to_upload: A human readable name for the model.\ninput_data_config: The dataset specification.\ndataset_id: The Vertex dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier.\nfraction_split: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML.\nEnd of explanation\nTRANSFORMATIONS = [\n {\"auto\": {\"column_name\": \"Age\"}},\n {\"auto\": {\"column_name\": \"Job\"}},\n {\"auto\": {\"column_name\": \"MaritalStatus\"}},\n {\"auto\": {\"column_name\": \"Education\"}},\n {\"auto\": {\"column_name\": \"Default\"}},\n {\"auto\": {\"column_name\": \"Balance\"}},\n {\"auto\": {\"column_name\": \"Housing\"}},\n {\"auto\": {\"column_name\": \"Loan\"}},\n {\"auto\": {\"column_name\": \"Contact\"}},\n {\"auto\": {\"column_name\": \"Day\"}},\n {\"auto\": {\"column_name\": \"Month\"}},\n {\"auto\": {\"column_name\": \"Duration\"}},\n {\"auto\": {\"column_name\": \"Campaign\"}},\n {\"auto\": {\"column_name\": \"PDays\"}},\n {\"auto\": {\"column_name\": \"POutcome\"}},\n]\nPIPE_NAME = \"bank_pipe-\" + TIMESTAMP\nMODEL_NAME = \"bank_model-\" + TIMESTAMP\ntask = Value(\n struct_value=Struct(\n fields={\n \"target_column\": Value(string_value=label_column),\n \"prediction_type\": Value(string_value=\"classification\"),\n \"train_budget_milli_node_hours\": Value(number_value=1000),\n \"disable_early_stopping\": Value(bool_value=False),\n \"transformations\": json_format.ParseDict(TRANSFORMATIONS, Value()),\n }\n )\n)\nresponse = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)\nExplanation: Construct the task requirements\nNext, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the task field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the json_format.ParseDict method for the conversion.\nThe minimal fields you need to specify are:\nprediction_type: Whether we are doing \"classification\" or \"regression\".\ntarget_column: The CSV heading column name for the column we want to predict (i.e., the label).\ntrain_budget_milli_node_hours: The maximum time to budget (billed) for training the model, where 1000 = 1 hour.\ndisable_early_stopping: Whether True/False to let AutoML use its judgement to stop training early or train for the entire budget.\ntransformations: Specifies the feature engineering for each feature column.\nFor transformations, the list must have an entry for each column. The outer key field indicates the type of feature engineering for the corresponding column. In this tutorial, you set it to \"auto\" to tell AutoML to automatically determine it.\nFinally, create the pipeline by calling the helper function create_pipeline, which returns an instance of a training pipeline object.\nEnd of explanation\n# The full unique ID for the pipeline\npipeline_id = response.name\n# The short numeric ID for the pipeline\npipeline_short_id = pipeline_id.split(\"/\")[-1]\nprint(pipeline_id)\nExplanation: Now save the unique identifier of the training pipeline you created.\nEnd of explanation\ndef get_training_pipeline(name, silent=False):\n response = clients[\"pipeline\"].get_training_pipeline(name=name)\n if silent:\n return response\n print(\"pipeline\")\n print(\" name:\", response.name)\n print(\" display_name:\", response.display_name)\n print(\" state:\", response.state)\n print(\" training_task_definition:\", response.training_task_definition)\n print(\" training_task_inputs:\", dict(response.training_task_inputs))\n print(\" create_time:\", response.create_time)\n print(\" start_time:\", response.start_time)\n print(\" end_time:\", response.end_time)\n print(\" update_time:\", response.update_time)\n print(\" labels:\", dict(response.labels))\n return response\nresponse = get_training_pipeline(pipeline_id)\nExplanation: Get information on a training pipeline\nNow get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter:\nname: The Vertex fully qualified pipeline identifier.\nWhen the model is done training, the pipeline state will be PIPELINE_STATE_SUCCEEDED.\nEnd of explanation\nwhile True:\n response = get_training_pipeline(pipeline_id, True)\n if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:\n print(\"Training job has not completed:\", response.state)\n model_to_deploy_id = None\n if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:\n raise Exception(\"Training Job Failed\")\n else:\n model_to_deploy = response.model_to_upload\n model_to_deploy_id = model_to_deploy.name\n print(\"Training Time:\", response.end_time - response.start_time)\n break\n time.sleep(60)\nprint(\"model to deploy:\", model_to_deploy_id)\nExplanation: Deployment\nTraining the above model may take upwards of 30 minutes time.\nOnce your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name.\nEnd of explanation\ndef list_model_evaluations(name):\n response = clients[\"model\"].list_model_evaluations(parent=name)\n for evaluation in response:\n print(\"model_evaluation\")\n print(\" name:\", evaluation.name)\n print(\" metrics_schema_uri:\", evaluation.metrics_schema_uri)\n metrics = json_format.MessageToDict(evaluation._pb.metrics)\n for metric in metrics.keys():\n print(metric)\n print(\"logloss\", metrics[\"logLoss\"])\n print(\"auPrc\", metrics[\"auPrc\"])\n return evaluation.name\nlast_evaluation = list_model_evaluations(model_to_deploy_id)\nExplanation: Model information\nNow that your model is trained, you can get some information on your model.\nEvaluate the Model resource\nNow find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model.\nList evaluations for all slices\nUse this helper function list_model_evaluations, which takes the following parameter:\nname: The Vertex fully qualified model identifier for the Model resource.\nThis helper function uses the model client service's list_model_evaluations method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric.\nFor each evaluation (you probably only have one) we then print all the key names for each metric in the evaluation, and for a small set (logLoss and auPrc) you will print the result.\nEnd of explanation\nENDPOINT_NAME = \"bank_endpoint-\" + TIMESTAMP\ndef create_endpoint(display_name):\n endpoint = {\"display_name\": display_name}\n response = clients[\"endpoint\"].create_endpoint(parent=PARENT, endpoint=endpoint)\n print(\"Long running operation:\", response.operation.name)\n result = response.result(timeout=300)\n print(\"result\")\n print(\" name:\", result.name)\n print(\" display_name:\", result.display_name)\n print(\" description:\", result.description)\n print(\" labels:\", result.labels)\n print(\" create_time:\", result.create_time)\n print(\" update_time:\", result.update_time)\n return result\nresult = create_endpoint(ENDPOINT_NAME)\nExplanation: Deploy the Model resource\nNow deploy the trained Vertex Model resource you created with AutoML. This requires two steps:\nCreate an Endpoint resource for deploying the Model resource to.\nDeploy the Model resource to the Endpoint resource.\nCreate an Endpoint resource\nUse this helper function create_endpoint to create an endpoint to deploy the model to for serving predictions, with the following parameter:\ndisplay_name: A human readable name for the Endpoint resource.\nThe helper function uses the endpoint client service's create_endpoint method, which takes the following parameter:\ndisplay_name: A human readable name for the Endpoint resource.\nCreating an Endpoint resource returns a long running operation, since it may take a few moments to provision the Endpoint resource for serving. You call response.result(), which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the Endpoint resource: response.name.\nEnd of explanation\n# The full unique ID for the endpoint\nendpoint_id = result.name\n# The short numeric ID for the endpoint\nendpoint_short_id = endpoint_id.split(\"/\")[-1]\nprint(endpoint_id)\nExplanation: Now get the unique identifier for the Endpoint resource you created.\nEnd of explanation\nMIN_NODES = 1\nMAX_NODES = 1\nExplanation: Compute instance scaling\nYou have several choices on scaling the compute instances for handling your online prediction requests:\nSingle Instance: The online prediction requests are processed on a single compute instance.\nSet the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to one.\nManual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.\nSet the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.\nAuto Scaling: The online prediction requests are split across a scaleable number of compute instances.\nSet the minimum (MIN_NODES) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.\nThe minimum number of compute instances corresponds to the field min_replica_count and the maximum number of compute instances corresponds to the field max_replica_count, in your subsequent deployment request.\nEnd of explanation\nDEPLOYED_NAME = \"bank_deployed-\" + TIMESTAMP\ndef deploy_model(\n model, deployed_model_display_name, endpoint, traffic_split={\"0\": 100}\n):\n if DEPLOY_GPU:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_type\": DEPLOY_GPU,\n \"accelerator_count\": DEPLOY_NGPU,\n }\n else:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_count\": 0,\n }\n deployed_model = {\n \"model\": model,\n \"display_name\": deployed_model_display_name,\n \"dedicated_resources\": {\n \"min_replica_count\": MIN_NODES,\n \"max_replica_count\": MAX_NODES,\n \"machine_spec\": machine_spec,\n },\n \"disable_container_logging\": False,\n }\n response = clients[\"endpoint\"].deploy_model(\n endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split\n )\n print(\"Long running operation:\", response.operation.name)\n result = response.result()\n print(\"result\")\n deployed_model = result.deployed_model\n print(\" deployed_model\")\n print(\" id:\", deployed_model.id)\n print(\" model:\", deployed_model.model)\n print(\" display_name:\", deployed_model.display_name)\n print(\" create_time:\", deployed_model.create_time)\n return deployed_model.id\ndeployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)\nExplanation: Deploy Model resource to the Endpoint resource\nUse this helper function deploy_model to deploy the Model resource to the Endpoint resource you created for serving predictions, with the following parameters:\nmodel: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.\ndeploy_model_display_name: A human readable name for the deployed model.\nendpoint: The Vertex fully qualified endpoint identifier to deploy the model to.\nThe helper function calls the Endpoint client service's method deploy_model, which takes the following parameters:\nendpoint: The Vertex fully qualified Endpoint resource identifier to deploy the Model resource to.\ndeployed_model: The requirements specification for deploying the model.\ntraffic_split: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.\nIf only one model, then specify as { \"0\": 100 }, where \"0\" refers to this model being uploaded and 100 means 100% of the traffic.\nIf there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { \"0\": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100.\nLet's now dive deeper into the deployed_model parameter. This parameter is specified as a Python dictionary with the minimum required fields:\nmodel: The Vertex fully qualified model identifier of the (upload) model to deploy.\ndisplay_name: A human readable name for the deployed model.\ndisable_container_logging: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.\ndedicated_resources: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.\nmachine_spec: The compute instance to provision. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated.\nmin_replica_count: The number of compute instances to initially provision, which you set earlier as the variable MIN_NODES.\nmax_replica_count: The maximum number of compute instances to scale to, which you set earlier as the variable MAX_NODES.\nTraffic Split\nLet's now dive deeper into the traffic_split parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.\nWhy would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.\nResponse\nThe method returns a long running operation response. We will wait sychronously for the operation to complete by calling the response.result(), which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.\nEnd of explanation\nINSTANCE = {\n \"Age\": \"58\",\n \"Job\": \"managment\",\n \"MaritalStatus\": \"married\",\n \"Education\": \"teritary\",\n \"Default\": \"no\",\n \"Balance\": \"2143\",\n \"Housing\": \"yes\",\n \"Loan\": \"no\",\n \"Contact\": \"unknown\",\n \"Day\": \"5\",\n \"Month\": \"may\",\n \"Duration\": \"261\",\n \"Campaign\": \"1\",\n \"PDays\": \"-1\",\n \"Previous\": 0,\n \"POutcome\": \"unknown\",\n}\nExplanation: Make a online prediction request\nNow do a online prediction to your deployed model.\nMake test item\nYou will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.\nEnd of explanation\ndef predict_item(data, endpoint, parameters_dict):\n parameters = json_format.ParseDict(parameters_dict, Value())\n # The format of each instance should conform to the deployed model's prediction input schema.\n instances_list = [data]\n instances = [json_format.ParseDict(s, Value()) for s in instances_list]\n response = clients[\"prediction\"].predict(\n endpoint=endpoint, instances=instances, parameters=parameters\n )\n print(\"response\")\n print(\" deployed_model_id:\", response.deployed_model_id)\n predictions = response.predictions\n print(\"predictions\")\n for prediction in predictions:\n print(\" prediction:\", dict(prediction))\npredict_item(INSTANCE, endpoint_id, None)\nExplanation: Make a prediction\nNow you have a test item. Use this helper function predict_item, which takes the following parameters:\nfilename: The Cloud Storage path to the test item.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.\nparameters_dict: Additional filtering parameters for serving prediction results.\nThis function calls the prediction client service's predict method with the following parameters:\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.\ninstances: A list of instances (data items) to predict.\nparameters: Additional filtering parameters for serving prediction results. Note, tabular models do not support additional parameters.\nRequest\nThe format of each instance is, where values must be specified as a string:\n{ 'feature_1': 'value_1', 'feature_2': 'value_2', ... }\nSince the predict() method can take multiple items (instances), you send your single test item as a list of one test item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the predict() method.\nResponse\nThe response object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction -- in this case there is just one:\nconfidences: Confidence level in the prediction.\ndisplayNames: The predicted label.\nEnd of explanation\ndef undeploy_model(deployed_model_id, endpoint):\n response = clients[\"endpoint\"].undeploy_model(\n endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}\n )\n print(response)\nundeploy_model(deployed_model_id, endpoint_id)\nExplanation: Undeploy the Model resource\nNow undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters:\ndeployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed to.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model is deployed to.\nThis function calls the endpoint client service's method undeploy_model, with the following parameters:\ndeployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource is deployed.\ntraffic_split: How to split traffic among the remaining deployed models on the Endpoint resource.\nSince this is the only deployed model on the Endpoint resource, you simply can leave traffic_split empty by setting it to {}.\nEnd of explanation\ndelete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n# Delete the dataset using the Vertex fully qualified identifier for the dataset\ntry:\n if delete_dataset and \"dataset_id\" in globals():\n clients[\"dataset\"].delete_dataset(name=dataset_id)\nexcept Exception as e:\n print(e)\n# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline\ntry:\n if delete_pipeline and \"pipeline_id\" in globals():\n clients[\"pipeline\"].delete_training_pipeline(name=pipeline_id)\nexcept Exception as e:\n print(e)\n# Delete the model using the Vertex fully qualified identifier for the model\ntry:\n if delete_model and \"model_to_deploy_id\" in globals():\n clients[\"model\"].delete_model(name=model_to_deploy_id)\nexcept Exception as e:\n print(e)\n# Delete the endpoint using the Vertex fully qualified identifier for the endpoint\ntry:\n if delete_endpoint and \"endpoint_id\" in globals():\n clients[\"endpoint\"].delete_endpoint(name=endpoint_id)\nexcept Exception as e:\n print(e)\n# Delete the batch job using the Vertex fully qualified identifier for the batch job\ntry:\n if delete_batchjob and \"batch_job_id\" in globals():\n clients[\"job\"].delete_batch_prediction_job(name=batch_job_id)\nexcept Exception as e:\n print(e)\n# Delete the custom job using the Vertex fully qualified identifier for the custom job\ntry:\n if delete_customjob and \"job_id\" in globals():\n clients[\"job\"].delete_custom_job(name=job_id)\nexcept Exception as e:\n print(e)\n# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job\ntry:\n if delete_hptjob and \"hpt_job_id\" in globals():\n clients[\"job\"].delete_hyperparameter_tuning_job(name=hpt_job_id)\nexcept Exception as e:\n print(e)\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME\nExplanation: Cleaning up\nTo clean up all GCP resources used in this project, you can delete the GCP\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial:\nDataset\nPipeline\nModel\nEndpoint\nBatch Job\nCustom Job\nHyperparameter Tuning Job\nCloud Storage Bucket\nEnd of explanation"}}},{"rowIdx":2187,"cells":{"Unnamed: 0":{"kind":"number","value":2187,"string":"2,187"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n \"Open\nADVI from scratch in JAX\nAuthors: karm-patel@, murphyk@\nIn this notebook we apply ADVI (automatic differentiation variational inference) to the beta-binomial model, using a Normal Distribution as Variational Posterior. This involves a change of variable from the unconstrained z in R space to the constrained theta in [0,1] space.\nEnd of explanation\ndef prior_dist():\n return dist.Beta(concentration1=1.0, concentration0=1.0)\ndef likelihood_dist(theta):\n return dist.Bernoulli(probs=theta)\ndef transform_fn(x):\n return 1 / (1 + jnp.exp(-x)) # sigmoid\ndef positivity_fn(x):\n return jnp.log(1 + jnp.exp(x)) # softplus\ndef variational_distribution_q(params):\n loc = params[\"loc\"]\n scale = positivity_fn(params[\"scale\"]) # apply softplus\n return dist.Normal(loc, scale)\njacobian_fn = jax.jacfwd(transform_fn) # define function to find jacobian for tranform_fun\nExplanation: Functions\nHelper functions which will be used later\nEnd of explanation\n# preparing dataset\n# key = jax.random.PRNGKey(128)\n# n_samples = 12\n# theta_true = prior_dist().sample((5,),key)[0]\n# dataset = likelihood_dist(theta_true).sample(n_samples,key)\n# print(f\"Dataset: {dataset}\")\n# n_heads = dataset.sum()\n# n_tails = n_samples - n_heads\n# Use same data as https://github.com/probml/probml-notebooks/blob/main/notebooks/beta_binom_approx_post_pymc.ipynb\nkey = jax.random.PRNGKey(128)\ndataset = np.repeat([0, 1], (10, 1))\nn_samples = len(dataset)\nprint(f\"Dataset: {dataset}\")\nn_heads = dataset.sum()\nn_tails = n_samples - n_heads\nExplanation: Dataset\nNow, we will create the dataset. we sample theta_true (probability of occurring head) random variable from the prior distribution which is Beta in this case. Then we sample n_samples coin tosses from likelihood distribution which is Bernouli in this case.\nEnd of explanation\n# closed form of beta posterior\na = prior_dist().concentration1\nb = prior_dist().concentration0\nexact_posterior = dist.Beta(concentration1=a + n_heads, concentration0=b + n_tails)\ntheta_range = jnp.linspace(0.01, 0.99, 100)\nax = plt.gca()\nax2 = ax.twinx()\n(plt2,) = ax2.plot(theta_range, exact_posterior.prob(theta_range), \"g--\", label=\"True Posterior\")\n(plt3,) = ax2.plot(theta_range, prior_dist().prob(theta_range), label=\"Prior\")\nlikelihood = jax.vmap(lambda x: jnp.prod(likelihood_dist(x).prob(dataset)))(theta_range)\n(plt1,) = ax.plot(theta_range, likelihood, \"r-.\", label=\"Likelihood\")\nax.set_xlabel(\"theta\")\nax.set_ylabel(\"Likelihood\")\nax2.set_ylabel(\"Prior & Posterior\")\nax2.legend(handles=[plt1, plt2, plt3], bbox_to_anchor=(1.6, 1));\nExplanation: Prior, Likelihood, and True Posterior\nFor coin toss problem, since we know the closed form solution of posterior, we compare the distributions of Prior, Likelihood, and True Posterior below.\nEnd of explanation\ndef log_prior_likelihood_jacobian(normal_sample, dataset):\n theta = transform_fn(normal_sample) # transform normal sample to beta sample\n likelihood_log_prob = likelihood_dist(theta).log_prob(dataset).sum() # log probability of likelihood\n prior_log_prob = prior_dist().log_prob(theta) # log probability of prior\n log_det_jacob = jnp.log(\n jnp.abs(jnp.linalg.det(jacobian_fn(normal_sample).reshape(1, 1)))\n ) # log of determinant of jacobian\n return likelihood_log_prob + prior_log_prob + log_det_jacob\n# reference: https://code-first-ml.github.io/book2/notebooks/introduction/variational.html\ndef negative_elbo(params, dataset, n_samples=10, key=jax.random.PRNGKey(1)):\n q = variational_distribution_q(params) # Normal distribution.\n q_loc, q_scale = q.loc, q.scale\n std_normal = dist.Normal(0, 1)\n sample_set = std_normal.sample(\n seed=key,\n sample_shape=[\n n_samples,\n ],\n )\n sample_set = q_loc + q_scale * sample_set # reparameterization trick\n # calculate log joint for each sample of z\n p_log_prob = jax.vmap(log_prior_likelihood_jacobian, in_axes=(0, None))(sample_set, dataset)\n return jnp.mean(q.log_prob(sample_set) - p_log_prob)\nExplanation: Optimizing the ELBO\nIn order to minimize KL divergence between true posterior and variational distribution, we need to minimize the negative ELBO, as we describe below.\nWe start with the ELBO, which is given by:\n\\begin{align}\nELBO(\\psi) &= E_{z \\sim q(z|\\psi)} \\left[\n p(\\mathcal{D}|z) + \\log p(z) - \\log q(z|\\psi) \\right]\n\\end{align}\nwhere \n$\\psi = (\\mu, \\sigma)$ are the variational parameters,\n$p(\\mathcal{D}|z) = p(\\mathcal{D}|\\theta=\\sigma(z))$\nis the likelihood,\nand the prior is given by the change of variables formula:\n\\begin{align}\np(z) &= p(\\theta) | \\frac{\\partial \\theta}{\\partial z} |\n = p(\\theta) | J | \n\\end{align}\nwhere $J$ is the Jacobian of the $z \\rightarrow \\theta$ mapping.\nWe will use a Monte Carlo approximation of the expectation over $z$.\nWe also apply the reparameterization trick\nto replace $z \\sim q(z|\\psi)$ with\n\\begin{align}\n\\epsilon &\\sim \\mathcal{N}(0,1 ) \\\nz &= \\mu + \\sigma \\epsilon\n\\end{align}\nPutting it altogether our estimate for the negative ELBO (for a single sample of $\\epsilon$) is\n\\begin{align}\n-L(\\psi; z) &= -( \\log p(\\mathcal{D}|\\theta ) \n+\\log p( \\theta) + \\log|J_\\boldsymbol{\\sigma}(z)|) \n+ \\log q(z|\\psi)\n\\end{align}\nEnd of explanation\nloss_and_grad_fn = jax.value_and_grad(negative_elbo, argnums=(0))\nloss_and_grad_fn = jax.jit(loss_and_grad_fn) # jit the loss_and_grad function\nparams = {\"loc\": 0.0, \"scale\": 0.5}\nelbo, grads = loss_and_grad_fn(params, dataset)\nprint(f\"loss: {elbo}\")\nprint(f\"grads:\\n loc: {grads['loc']}\\n scale: {grads['scale']} \")\noptimizer = optax.adam(learning_rate=0.01)\nopt_state = optimizer.init(params)\n# jax scannable function for training\ndef train_step(carry, data_output):\n # take carry data\n key = carry[\"key\"]\n elbo = carry[\"elbo\"]\n grads = carry[\"grads\"]\n params = carry[\"params\"]\n opt_state = carry[\"opt_state\"]\n updates = carry[\"updates\"]\n # training\n key, subkey = jax.random.split(key)\n elbo, grads = loss_and_grad_fn(params, dataset, key=subkey)\n updates, opt_state = optimizer.update(grads, opt_state)\n params = optax.apply_updates(params, updates)\n # forward carry to next iteration by storing it\n carry = {\"key\": subkey, \"elbo\": elbo, \"grads\": grads, \"params\": params, \"opt_state\": opt_state, \"updates\": updates}\n output = {\"elbo\": elbo, \"params\": params}\n return carry, output\n%%time\n# dummy iteration to pass carry to jax scannale function train()\nkey, subkey = jax.random.split(key)\nelbo, grads = loss_and_grad_fn(params, dataset, key=subkey)\nupdates, opt_state = optimizer.update(grads, opt_state)\nparams = optax.apply_updates(params, updates)\ncarry = {\"key\": key, \"elbo\": elbo, \"grads\": grads, \"params\": params, \"opt_state\": opt_state, \"updates\": updates}\nnum_iter = 1000\nelbos = np.empty(num_iter)\n# apply scan() to optimize training loop\nlast_carry, output = lax.scan(train_step, carry, elbos)\nelbo = output[\"elbo\"]\nparams = output[\"params\"]\noptimized_params = last_carry[\"params\"]\nprint(params[\"loc\"].shape)\nprint(params[\"scale\"].shape)\nExplanation: We now apply stochastic gradient descent to minimize negative ELBO and optimize the variational parameters (loc and scale)\nEnd of explanation\nplt.plot(elbo)\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"Negative ELBO\")\nsns.despine()\nplt.savefig(\"advi_beta_binom_jax_loss.pdf\")\nExplanation: We now plot the ELBO\nEnd of explanation\nq_learned = variational_distribution_q(optimized_params)\nkey = jax.random.PRNGKey(128)\nq_learned_samples = q_learned.sample(1000, seed=key) # q(z|D)\ntransformed_samples = transform_fn(q_learned_samples) # transform Normal samples into Beta samples\ntheta_range = jnp.linspace(0.01, 0.99, 100)\nplt.plot(theta_range, exact_posterior.prob(theta_range), \"r\", label=\"$p(x)$: true posterior\")\nsns.kdeplot(transformed_samples, color=\"blue\", label=\"$q(x)$: learned\", bw_adjust=1.5, clip=(0.0, 1.0), linestyle=\"--\")\nplt.xlabel(\"theta\")\nplt.legend() # bbox_to_anchor=(1.5, 1));\nsns.despine()\nplt.savefig(\"advi_beta_binom_jax_posterior.pdf\")\nExplanation: We can see that after 200 iterations ELBO is optimized and not changing too much.\nSamples using Optimized parameters\nNow, we take 1000 samples from variational distribution (Normal) and transform them into true posterior distribution (Beta) by applying tranform_fn (sigmoid) on samples. Then we compare density of samples with exact posterior.\nEnd of explanation\n# print(transformed_samples)\nprint(len(transformed_samples))\nprint(jnp.sum(transformed_samples < 0)) # all samples of thetas should be in [0,1]\nprint(jnp.sum(transformed_samples > 1)) # all samples of thetas should be in [0,1]\nprint(q_learned)\nprint(q_learned.mean())\nprint(jnp.sqrt(q_learned.variance()))\nlocs, scales = params[\"loc\"], params[\"scale\"]\nsigmas = positivity_fn(jnp.array(scales))\nplt.plot(locs, label=\"mu\")\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"$E_q[z]$\")\nplt.legend()\nsns.despine()\nplt.savefig(\"advi_beta_binom_jax_post_mu_vs_time.pdf\")\nplt.show()\nplt.plot(sigmas, label=\"sigma\")\nplt.xlabel(\"Iterations\")\n# plt.ylabel(r'$\\sqrt{\\text{var}(z)}')\nplt.ylabel(\"$std_{q}[z]$\")\nplt.legend()\nsns.despine()\nplt.savefig(\"advi_beta_binom_jax_post_sigma_vs_time.pdf\")\nplt.show()\nExplanation: We can see that the learned q(x) is a reasonably good approximation to the true posterior. It seems to have support over negative theta but this is an artefact of KDE.\nEnd of explanation\ntry:\n import pymc3 as pm\nexcept ModuleNotFoundError:\n %pip install -qq pymc3\n import pymc3 as pm\ntry:\n import scipy.stats as stats\nexcept ModuleNotFoundError:\n %pip install -qq scipy\n import scipy.stats as stats\nimport scipy.special as sp\ntry:\n import arviz as az\nexcept ModuleNotFoundError:\n %pip install -qq arviz\n import arviz as az\nimport math\na = prior_dist().concentration1\nb = prior_dist().concentration0\nwith pm.Model() as mf_model:\n theta = pm.Beta(\"theta\", a, b)\n y = pm.Binomial(\"y\", n=1, p=theta, observed=dataset) # Bernoulli\n advi = pm.ADVI()\n tracker = pm.callbacks.Tracker(\n mean=advi.approx.mean.eval, # callable that returns mean\n std=advi.approx.std.eval, # callable that returns std\n )\n approx = advi.fit(callbacks=[tracker], n=20000)\ntrace_approx = approx.sample(1000)\nthetas = trace_approx[\"theta\"]\nplt.plot(advi.hist, label=\"ELBO\")\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"ELBO\")\nplt.legend()\nsns.despine()\nplt.savefig(\"advi_beta_binom_pymc_loss.pdf\")\nplt.show()\nprint(f\"ELBO comparison for last 1% iterations:\\nJAX ELBO: {elbo[-10:].mean()}\\nPymc ELBO: {advi.hist[-100:].mean()}\")\nExplanation: Comparison with pymc.ADVI()\nNow, we compare our implementation with pymc's ADVI implementation. \nNote: For pymc implementation, the code is taken from this notebook: https://github.com/probml/probml-notebooks/blob/main/notebooks/beta_binom_approx_post_pymc.ipynb\nEnd of explanation\nplt.plot(theta_range, exact_posterior.prob(theta_range), \"b--\", label=\"$p(x)$: True Posterior\")\nsns.kdeplot(transformed_samples, color=\"red\", label=\"$q(x)$: learnt - jax\", clip=(0.0, 1.0), bw_adjust=1.5)\nsns.kdeplot(thetas, label=\"$q(x)$: learnt - pymc\", clip=(0.0, 1.0), bw_adjust=1.5)\nplt.xlabel(\"theta\")\nplt.legend(bbox_to_anchor=(1.3, 1))\nsns.despine()\nExplanation: True posterior, JAX q(x), and pymc q(x)\nEnd of explanation\nfig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharey=True)\nlocs, scales = params[\"loc\"], params[\"scale\"]\n# plot loc\n# JAX\nax1.plot(locs, label=\"JAX: loc\")\nax1.set_ylabel(\"loc\")\nax1.legend()\n# pymc\nax2.plot(tracker[\"mean\"], label=\"Pymc: loc\")\nax2.legend()\nsns.despine()\n# plot scale\nfig2, (ax3, ax4) = plt.subplots(1, 2, figsize=(10, 4), sharey=True)\n# JAX\nax3.plot(positivity_fn(jnp.array(scales)), label=\"JAX: scale\")\n# apply softplus on scale\nax3.set_xlabel(\"Iterations\")\nax3.set_ylabel(\"scale\")\nax3.legend()\n# pymc\nax4.plot(tracker[\"std\"], label=\"Pymc: scale\")\nax4.set_xlabel(\"Iterations\")\nax4.legend()\nsns.despine();\nExplanation: Plot of loc and scale for variational distribution\nEnd of explanation"}}},{"rowIdx":2188,"cells":{"Unnamed: 0":{"kind":"number","value":2188,"string":"2,188"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Introduction to Survival Analysis with scikit-survival\nscikit-survival is a Python module for survival analysis built on top of scikit-learn. It allows doing survival analysis while utilizing the power of scikit-learn, e.g., for pre-processing or doing cross-validation.\nTable of Contents\nWhat is Survival Analysis?\nThe Veterans' Administration Lung Cancer Trial\nSurvival Data\nThe Survival Function\nConsidering other variables by stratification\nMultivariate Survival Models\nMeasuring the Performance of Survival Models\nFeature Selection\nStep1: We can easily see that only a few survival times are right-censored (Status is False), i.e., most veteran's died during the study period (Status is True).\nThe Survival Function\nA key quantity in survival analysis is the so-called survival function, which relates time to the probability of surviving beyond a given time point.\nLet $T$ denote a continuous non-negative random variable corresponding to a patient’s survival time. The survival function $S(t)$ returns the probability of survival beyond time $t$ and is defined as\n$$ S(t) = P (T > t). $$\nIf we observed the exact survival time of all subjects, i.e., everyone died before the study ended, the survival function at time $t$ can simply be estimated by the ratio of patients surviving beyond time $t$ and the total number of patients\nStep2: Using the formula from above, we can compute $\\hat{S}(t=11) = \\frac{3}{5}$, but not $\\hat{S}(t=30)$, because we don't know whether the 4th patient is still alive at $t = 30$, all we know is that when we last checked at $t = 25$, the patient was still alive.\nAn estimator, similar to the one above, that is valid if survival times are right-censored is the Kaplan-Meier estimator.\nStep3: The estimated curve is a step function, with steps occurring at time points where one or more patients died. From the plot we can see that most patients died in the first 200 days, as indicated by the steep slope of the estimated survival function in the first 200 days.\nConsidering other variables by stratification\nSurvival functions by treatment\nPatients enrolled in the Veterans' Administration Lung Cancer Trial were randomized to one of two treatments\nStep4: Roughly half the patients received the alternative treatment.\nThe obvious questions to ask is\nStep5: Unfortunately, the results are inconclusive, because the difference between the two estimated survival functions is too small to confidently argue that the drug affects survival or not.\nSidenote\nStep6: In this case, we observe a pronounced difference between two groups. Patients with squamous or large cells seem to have a better prognosis compared to patients with small or adeno cells.\nMultivariate Survival Models\nIn the Kaplan-Meier approach used above, we estimated multiple survival curves by dividing the dataset into smaller sub-groups according to a variable. If we want to consider more than 1 or 2 variables, this approach quickly becomes infeasible, because subgroups will get very small. Instead, we can use a linear model, Cox's proportional hazard's model, to estimate the impact each variable has on survival.\nFirst however, we need to convert the categorical variables in the data set into numeric values.\nStep7: Survival models in scikit-survival follow the same rules as estimators in scikit-learn, i.e., they have a fit method, which expects a data matrix and a structured array of survival times and binary event indicators.\nStep8: The result is a vector of coefficients, one for each variable, where each value corresponds to the log hazard ratio.\nStep9: Using the fitted model, we can predict a patient-specific survival function, by passing an appropriate data matrix to the estimator's predict_survival_function method.\nFirst, let's create a set of four synthetic patients.\nStep10: Similar to kaplan_meier_estimator, the predict_survival_function method returns a sequence of step functions, which we can plot.\nStep11: Measuring the Performance of Survival Models\nOnce we fit a survival model, we usually want to assess how well a model can actually predict survival. Our test data is usually subject to censoring too, therefore metrics like root mean squared error or correlation are unsuitable. Instead, we use generalization of the area under the receiver operating characteristic (ROC) curve called Harrell's concordance index or c-index.\nThe interpretation is identical to the traditional area under the ROC curve metric for binary classification\nStep12: or alternatively\nStep13: Our model's c-index indicates that the model clearly performs better than random, but is also far from perfect.\nFeature Selection\nStep14: Karnofsky_score is the best variable, whereas Months_from_Diagnosis and Prior_therapy='yes' have almost no predictive power on their own.\nNext, we want to build a parsimonious model by excluding irrelevant features. We could use the ranking from above, but would need to determine what the optimal cut-off should be. Luckily, scikit-learn has built-in support for performing grid search.\nFirst, we create a pipeline that puts all the parts together.\nStep15: Next, we need to define the range of parameters we want to explore during grid search. Here, we want to optimize the parameter k of the SelectKBest class and allow k to vary from 1 feature to all 8 features.\nStep16: The results show that it is sufficient to select the 3 most predictive features."},"code_prompt":{"kind":"string","value":"Python Code:\nfrom sksurv.datasets import load_veterans_lung_cancer\ndata_x, data_y = load_veterans_lung_cancer()\ndata_y\nExplanation: Introduction to Survival Analysis with scikit-survival\nscikit-survival is a Python module for survival analysis built on top of scikit-learn. It allows doing survival analysis while utilizing the power of scikit-learn, e.g., for pre-processing or doing cross-validation.\nTable of Contents\nWhat is Survival Analysis?\nThe Veterans' Administration Lung Cancer Trial\nSurvival Data\nThe Survival Function\nConsidering other variables by stratification\nMultivariate Survival Models\nMeasuring the Performance of Survival Models\nFeature Selection: Which Variable is Most Predictive?\nWhat's next?\nWhat is Survival Analysis?\nThe objective in survival analysis — also referred to as reliability analysis in engineering — is to establish a connection between covariates and the time of an event. The name survival analysis originates from clinical research, where predicting the time to death, i.e., survival, is often the main objective. Survival analysis is a type of regression problem (one wants to predict a continuous value), but with a twist. It differs from traditional regression by the fact that parts of the training data can only be partially observed – they are censored.\nAs an example, consider a clinical study, which investigates coronary heart disease and has been carried out over a 1 year period as in the figure below.\nPatient A was lost to follow-up after three months with no recorded cardiovascular event, patient B experienced an event four and a half months after enrollment, patient D withdrew from the study two months after enrollment, and patient E did not experience any event before the study ended. Consequently, the exact time of a cardiovascular event could only be recorded for patients B and C; their records are uncensored. For the remaining patients it is unknown whether they did or did not experience an event after termination of the study. The only valid information that is available for patients A, D, and E is that they were event-free up to their last follow-up. Therefore, their records are censored.\nFormally, each patient record consists of a set of covariates $x \\in \\mathbb{R}^d$ , and the time $t>0$ when an event occurred or the time $c>0$ of censoring. Since censoring and experiencing and event are mutually exclusive, it is common to define an event indicator $\\delta \\in {0;1}$ and the observable survival time $y>0$. The observable time $y$ of a right censored sample is defined as\n$$\ny = \\min(t, c) = \n\\begin{cases} \nt & \\text{if } \\delta = 1 , \\ \nc & \\text{if } \\delta = 0 .\n\\end{cases}\n$$\nConsequently, survival analysis demands for models that take this unique characteristic of such a dataset into account, some of which are showcased below.\nThe Veterans' Administration Lung Cancer Trial\nThe Veterans' Administration Lung Cancer Trial is a randomized trial of two treatment regimens for lung cancer. The data set (Kalbfleisch J. and Prentice R, (1980) The Statistical Analysis of Failure Time Data. New York: Wiley) consists of 137 patients and 8 variables, which are described below:\nTreatment: denotes the type of lung cancer treatment; standard and test drug.\nCelltype: denotes the type of cell involved; squamous, small cell, adeno, large.\nKarnofsky_score: is the Karnofsky score.\nDiag: is the time since diagnosis in months.\nAge: is the age in years.\nPrior_Therapy: denotes any prior therapy; none or yes.\nStatus: denotes the status of the patient as dead or alive; dead or alive.\nSurvival_in_days: is the survival time in days since the treatment.\nOur primary interest is studying whether there are subgroups that differ in survival and whether we can predict survival times.\nSurvival Data\nAs described in the section What is Survival Analysis? above, survival times are subject to right-censoring, therefore, we need to consider an individual's status in addition to survival time. To be fully compatible with scikit-learn, Status and Survival_in_days need to be stored as a structured array with the first field indicating whether the actual survival time was observed or if was censored, and the second field denoting the observed survival time, which corresponds to the time of death (if Status == 'dead', $\\delta = 1$) or the last time that person was contacted (if Status == 'alive', $\\delta = 0$).\nEnd of explanation\nimport pandas as pd\npd.DataFrame.from_records(data_y[[11, 5, 32, 13, 23]], index=range(1, 6))\nExplanation: We can easily see that only a few survival times are right-censored (Status is False), i.e., most veteran's died during the study period (Status is True).\nThe Survival Function\nA key quantity in survival analysis is the so-called survival function, which relates time to the probability of surviving beyond a given time point.\nLet $T$ denote a continuous non-negative random variable corresponding to a patient’s survival time. The survival function $S(t)$ returns the probability of survival beyond time $t$ and is defined as\n$$ S(t) = P (T > t). $$\nIf we observed the exact survival time of all subjects, i.e., everyone died before the study ended, the survival function at time $t$ can simply be estimated by the ratio of patients surviving beyond time $t$ and the total number of patients:\n$$\n\\hat{S}(t) = \\frac{ \\text{number of patients surviving beyond $t$} }{ \\text{total number of patients} }\n$$\nIn the presence of censoring, this estimator cannot be used, because the numerator is not always defined. For instance, consider the following set of patients:\nEnd of explanation\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sksurv.nonparametric import kaplan_meier_estimator\ntime, survival_prob = kaplan_meier_estimator(data_y[\"Status\"], data_y[\"Survival_in_days\"])\nplt.step(time, survival_prob, where=\"post\")\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nExplanation: Using the formula from above, we can compute $\\hat{S}(t=11) = \\frac{3}{5}$, but not $\\hat{S}(t=30)$, because we don't know whether the 4th patient is still alive at $t = 30$, all we know is that when we last checked at $t = 25$, the patient was still alive.\nAn estimator, similar to the one above, that is valid if survival times are right-censored is the Kaplan-Meier estimator.\nEnd of explanation\ndata_x[\"Treatment\"].value_counts()\nExplanation: The estimated curve is a step function, with steps occurring at time points where one or more patients died. From the plot we can see that most patients died in the first 200 days, as indicated by the steep slope of the estimated survival function in the first 200 days.\nConsidering other variables by stratification\nSurvival functions by treatment\nPatients enrolled in the Veterans' Administration Lung Cancer Trial were randomized to one of two treatments: standard and a new test drug. Next, let's have a look at how many patients underwent the standard treatment and how many received the new drug.\nEnd of explanation\nfor treatment_type in (\"standard\", \"test\"):\n mask_treat = data_x[\"Treatment\"] == treatment_type\n time_treatment, survival_prob_treatment = kaplan_meier_estimator(\n data_y[\"Status\"][mask_treat],\n data_y[\"Survival_in_days\"][mask_treat])\n \n plt.step(time_treatment, survival_prob_treatment, where=\"post\",\n label=\"Treatment = %s\" % treatment_type)\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"best\")\nExplanation: Roughly half the patients received the alternative treatment.\nThe obvious questions to ask is:\nIs there any difference in survival between the two treatment groups?\nAs a first attempt, we can estimate the survival function in both treatment groups separately.\nEnd of explanation\nfor value in data_x[\"Celltype\"].unique():\n mask = data_x[\"Celltype\"] == value\n time_cell, survival_prob_cell = kaplan_meier_estimator(data_y[\"Status\"][mask],\n data_y[\"Survival_in_days\"][mask])\n plt.step(time_cell, survival_prob_cell, where=\"post\",\n label=\"%s (n = %d)\" % (value, mask.sum()))\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"best\")\nExplanation: Unfortunately, the results are inconclusive, because the difference between the two estimated survival functions is too small to confidently argue that the drug affects survival or not.\nSidenote: Visually comparing estimated survival curves in order to assess whether there is a difference in survival between groups is usually not recommended, because it is highly subjective. Statistical tests such as the log-rank test are usually more appropriate.\nSurvival functions by cell type\nNext, let's have a look at the cell type, which has been recorded as well, and repeat the analysis from above.\nEnd of explanation\nfrom sksurv.preprocessing import OneHotEncoder\ndata_x_numeric = OneHotEncoder().fit_transform(data_x)\ndata_x_numeric.head()\nExplanation: In this case, we observe a pronounced difference between two groups. Patients with squamous or large cells seem to have a better prognosis compared to patients with small or adeno cells.\nMultivariate Survival Models\nIn the Kaplan-Meier approach used above, we estimated multiple survival curves by dividing the dataset into smaller sub-groups according to a variable. If we want to consider more than 1 or 2 variables, this approach quickly becomes infeasible, because subgroups will get very small. Instead, we can use a linear model, Cox's proportional hazard's model, to estimate the impact each variable has on survival.\nFirst however, we need to convert the categorical variables in the data set into numeric values.\nEnd of explanation\nfrom sksurv.linear_model import CoxPHSurvivalAnalysis\nestimator = CoxPHSurvivalAnalysis()\nestimator.fit(data_x_numeric, data_y)\nExplanation: Survival models in scikit-survival follow the same rules as estimators in scikit-learn, i.e., they have a fit method, which expects a data matrix and a structured array of survival times and binary event indicators.\nEnd of explanation\npd.Series(estimator.coef_, index=data_x_numeric.columns)\nExplanation: The result is a vector of coefficients, one for each variable, where each value corresponds to the log hazard ratio.\nEnd of explanation\nx_new = pd.DataFrame.from_dict({\n 1: [65, 0, 0, 1, 60, 1, 0, 1],\n 2: [65, 0, 0, 1, 60, 1, 0, 0],\n 3: [65, 0, 1, 0, 60, 1, 0, 0],\n 4: [65, 0, 1, 0, 60, 1, 0, 1]},\n columns=data_x_numeric.columns, orient='index')\nx_new\nExplanation: Using the fitted model, we can predict a patient-specific survival function, by passing an appropriate data matrix to the estimator's predict_survival_function method.\nFirst, let's create a set of four synthetic patients.\nEnd of explanation\nimport numpy as np\npred_surv = estimator.predict_survival_function(x_new)\ntime_points = np.arange(1, 1000)\nfor i, surv_func in enumerate(pred_surv):\n plt.step(time_points, surv_func(time_points), where=\"post\",\n label=\"Sample %d\" % (i + 1))\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"best\")\nExplanation: Similar to kaplan_meier_estimator, the predict_survival_function method returns a sequence of step functions, which we can plot.\nEnd of explanation\nfrom sksurv.metrics import concordance_index_censored\nprediction = estimator.predict(data_x_numeric)\nresult = concordance_index_censored(data_y[\"Status\"], data_y[\"Survival_in_days\"], prediction)\nresult[0]\nExplanation: Measuring the Performance of Survival Models\nOnce we fit a survival model, we usually want to assess how well a model can actually predict survival. Our test data is usually subject to censoring too, therefore metrics like root mean squared error or correlation are unsuitable. Instead, we use generalization of the area under the receiver operating characteristic (ROC) curve called Harrell's concordance index or c-index.\nThe interpretation is identical to the traditional area under the ROC curve metric for binary classification:\n- a value of 0.5 denotes a random model,\n- a value of 1.0 denotes a perfect model,\n- a value of 0.0 denotes a perfectly wrong model.\nEnd of explanation\nestimator.score(data_x_numeric, data_y)\nExplanation: or alternatively\nEnd of explanation\nimport numpy as np\ndef fit_and_score_features(X, y):\n n_features = X.shape[1]\n scores = np.empty(n_features)\n m = CoxPHSurvivalAnalysis()\n for j in range(n_features):\n Xj = X[:, j:j+1]\n m.fit(Xj, y)\n scores[j] = m.score(Xj, y)\n return scores\nscores = fit_and_score_features(data_x_numeric.values, data_y)\npd.Series(scores, index=data_x_numeric.columns).sort_values(ascending=False)\nExplanation: Our model's c-index indicates that the model clearly performs better than random, but is also far from perfect.\nFeature Selection: Which Variable is Most Predictive?\nThe model above considered all available variables for prediction. Next, we want to investigate which single variable is the best risk predictor. Therefore, we fit a Cox model to each variable individually and record the c-index on the training set.\nEnd of explanation\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.pipeline import Pipeline\npipe = Pipeline([('encode', OneHotEncoder()),\n ('select', SelectKBest(fit_and_score_features, k=3)),\n ('model', CoxPHSurvivalAnalysis())])\nExplanation: Karnofsky_score is the best variable, whereas Months_from_Diagnosis and Prior_therapy='yes' have almost no predictive power on their own.\nNext, we want to build a parsimonious model by excluding irrelevant features. We could use the ranking from above, but would need to determine what the optimal cut-off should be. Luckily, scikit-learn has built-in support for performing grid search.\nFirst, we create a pipeline that puts all the parts together.\nEnd of explanation\nfrom sklearn.model_selection import GridSearchCV, KFold\nparam_grid = {'select__k': np.arange(1, data_x_numeric.shape[1] + 1)}\ncv = KFold(n_splits=3, random_state=1, shuffle=True)\ngcv = GridSearchCV(pipe, param_grid, return_train_score=True, cv=cv)\ngcv.fit(data_x, data_y)\nresults = pd.DataFrame(gcv.cv_results_).sort_values(by='mean_test_score', ascending=False)\nresults.loc[:, ~results.columns.str.endswith(\"_time\")]\nExplanation: Next, we need to define the range of parameters we want to explore during grid search. Here, we want to optimize the parameter k of the SelectKBest class and allow k to vary from 1 feature to all 8 features.\nEnd of explanation\npipe.set_params(**gcv.best_params_)\npipe.fit(data_x, data_y)\nencoder, transformer, final_estimator = [s[1] for s in pipe.steps]\npd.Series(final_estimator.coef_, index=encoder.encoded_columns_[transformer.get_support()])\nExplanation: The results show that it is sufficient to select the 3 most predictive features.\nEnd of explanation"}}},{"rowIdx":2189,"cells":{"Unnamed: 0":{"kind":"number","value":2189,"string":"2,189"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Data Science Academy - Python Fundamentos - Capítulo 2\nDownload\nStep1: Variáveis e Operadores\nStep2: Declaração Múltipla\nStep3: Pode-se usar letras, números e underline (mas não se pode começar com números)\nStep4: Não se pode usar palavras reservadas como nome de variável\nFalse\nclass\nfinally\nis\nreturn\nNone\ncontinue\nfor\nlambda\ntry\nTrue\ndef\nfrom\nnonlocal\nwhile\nand\ndel\nglobal\nnot\nwith\nas\nelif\nif\nor\nyield\nassert\nelse\nimport\npass\nbreak\nexcept\nin\nraise\nStep5: Variáveis atribuídas a outras variáveis e ordem dos operadores\nStep6: Operações com variáveis\nStep7: Concatenação de Variáveis"},"code_prompt":{"kind":"string","value":"Python Code:\n# Versão da Linguagem Python\nfrom platform import python_version\nprint('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())\nExplanation: Data Science Academy - Python Fundamentos - Capítulo 2\nDownload: http://github.com/dsacademybr\nEnd of explanation\n# Atribuindo o valor 1 à variável var_teste\nvar_teste = 1\n# Imprimindo o valor da variável\nvar_teste\n# Imprimindo o valor da variável\nprint(var_teste)\n# Não podemos utilizar uma variável que não foi definida. Veja a mensagem de erro.\nmy_var\nvar_teste = 2\nvar_teste\ntype(var_teste)\nvar_teste = 9.5\ntype(var_teste)\nx = 1\nx\nExplanation: Variáveis e Operadores\nEnd of explanation\npessoa1, pessoa2, pessoa3 = \"Maria\", \"José\", \"Tobias\"\npessoa1\npessoa2\npessoa3\nfruta1 = fruta2 = fruta3 = \"Laranja\"\nfruta1\nfruta2\n# Fique atento!!! Python é case-sensitive. Criamos a variável fruta2, mas não a variável Fruta2.\n# Letras maiúsculas e minúsculas tem diferença no nome da variável.\nFruta2\nExplanation: Declaração Múltipla\nEnd of explanation\nx1 = 50\nx1\n# Mensagem de erro, pois o Python não permite nomes de variáveis que iniciem com números\n1x = 50\nExplanation: Pode-se usar letras, números e underline (mas não se pode começar com números)\nEnd of explanation\n# Não podemos usar palavras reservadas como nome de variável\nbreak = 1\nExplanation: Não se pode usar palavras reservadas como nome de variável\nFalse\nclass\nfinally\nis\nreturn\nNone\ncontinue\nfor\nlambda\ntry\nTrue\ndef\nfrom\nnonlocal\nwhile\nand\ndel\nglobal\nnot\nwith\nas\nelif\nif\nor\nyield\nassert\nelse\nimport\npass\nbreak\nexcept\nin\nraise\nEnd of explanation\nlargura = 2\naltura = 4\narea = largura * altura\narea\nperimetro = 2 * largura + 2 * altura\nperimetro\n# A ordem dos operadores é a mesma seguida na Matemática\nperimetro = 2 * (largura + 2) * altura\nperimetro\nExplanation: Variáveis atribuídas a outras variáveis e ordem dos operadores\nEnd of explanation\nidade1 = 25\nidade2 = 35\nidade1 + idade2\nidade2 - idade1\nidade2 * idade1\nidade2 / idade1\nidade2 % idade1\nExplanation: Operações com variáveis\nEnd of explanation\nnome = \"Steve\"\nsobrenome = \"Jobs\"\nfullName = nome + \" \" + sobrenome\nfullName\nExplanation: Concatenação de Variáveis\nEnd of explanation"}}},{"rowIdx":2190,"cells":{"Unnamed: 0":{"kind":"number","value":2190,"string":"2,190"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n EDP Elípticas con Diferencias Finitas\nRecordemos que una ecuación diferencial parcial o EDP (PDE en inglés) es una ecuación que involucra funciones en dos o más variables y sus derivadas parciales. En este caso estudiamos las ecuaciones de tipo\n$$Au_{xx} + Bu_{xy} + Cu_{yy} + f(u_x, u_y, u, x, y) = 0$$\ndonde $A$, $B$ y $C$ son escalares y además $x,y$ son las variables independientes.\nEl discriminante de estas ecuaciones $B^2 -4AC$ nos indicará si una ecuación es parabólica, elíptica o hiperbólica. Las ecuaciones que nos interesan ahora son elípticas, por lo tanto cumplen con que $B^2-4AC < 0$.\nLas ecuaciones elípticas, a diferencia de los otros tipos, poseen condiciones de borde $\\partial R$ para ambas variables independientes.\nDefiniciones\nStep1: Para simplificar el sistema a resolver, cambiaremos los índices dobles por indices lineales mediante la conversión\n$$v_{i+(j-1)m} = w_{ij}$$\nSe puede pensar también como una operación de stack donde las filas de la grilla son colocadas una al lado de otra en orden.\nStep2: Luego debemos construir una matriz $A$ y un vector $b$ bajo esta nueva numeración tal que el sistema $Av=b$ sea resoluble y el resultado podamos trasladarlo de vuelta al sistema de $w_{ij}$. Esta matriz naturalmente será de tamaño $mn \\times mn$ y cada punto de la grilla tendrá su propia ecuación, como uno podría pensar.\nLa entrada $A_{pq}$ corresponde al $q$-ésimo coeficiente lineal de la $p$-ésima ecuación del sistema $Av =b$. Por ejemplo la ecuación\n$$\\frac{w_{i-1,j} -2w_{ij} + w_{i+1,j}}{h^2} + \\frac{w_{i,j-1}-2w_{ij}+ w_{i,j+1}}{k^2} = f(x_i, y_j)$$\nCorresponde a la ecuación para el punto $(i,j)$ de la grilla, y será la ecuación $p = i + (j-1)m$. Entonces, la entrada $A_{pq}$ no es más que, dada la conversión definida"},"code_prompt":{"kind":"string","value":"Python Code:\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.mlab import griddata\nm, n = 10, 4\nxl, xr = (0.0, 1.0)\nyb, yt = (0.0, 1.0)\nh = (xr - xl) / (m - 1.0)\nk = (yt - yb) / (n - 1.0)\nxx = [xl + (i - 1)*h for i in range(1, m+1)]\nyy = [yb + (i - 1)*k for i in range(1, n+1)]\nplt.figure(figsize=(10, 5))\nfor y in yy:\n plt.plot(xx, [y for _x in xx], 'co')\nplt.xlim(xl-0.1, xr+0.1)\nplt.ylim(yb-0.1, yt+0.1)\nplt.xticks([xl, xr], ['$x_l$', '$x_r$'], fontsize=20)\nplt.yticks([yb, yt], ['$y_b$', '$y_t$'], fontsize=20)\nplt.text(xl, yb, \"$w_{11}$\", fontsize=20)\nplt.text(xl+h, yb, \"$w_{21}$\", fontsize=20)\nplt.text(xl, yb+k, \"$w_{12}$\", fontsize=20)\nplt.text(xl, yt, \"$w_{1n}$\", fontsize=20)\nplt.text(xr, yt, \"$w_{mn}$\", fontsize=20)\nplt.text(xr, yb, \"$w_{m1}$\", fontsize=20)\nplt.title(\"Mesh para coordenadas en dos dimensiones\")\nplt.show()\nExplanation: EDP Elípticas con Diferencias Finitas\nRecordemos que una ecuación diferencial parcial o EDP (PDE en inglés) es una ecuación que involucra funciones en dos o más variables y sus derivadas parciales. En este caso estudiamos las ecuaciones de tipo\n$$Au_{xx} + Bu_{xy} + Cu_{yy} + f(u_x, u_y, u, x, y) = 0$$\ndonde $A$, $B$ y $C$ son escalares y además $x,y$ son las variables independientes.\nEl discriminante de estas ecuaciones $B^2 -4AC$ nos indicará si una ecuación es parabólica, elíptica o hiperbólica. Las ecuaciones que nos interesan ahora son elípticas, por lo tanto cumplen con que $B^2-4AC < 0$.\nLas ecuaciones elípticas, a diferencia de los otros tipos, poseen condiciones de borde $\\partial R$ para ambas variables independientes.\nDefiniciones:\nNunca deben olvidar la fórmula del laplaciano de una función! Es simplemente la suma de las segundas derivadas respecto a una misma variable.\n$$\\mathcal{L}(u) = \\Delta u = u_{xx} + u_{yy}$$\nLa ecuación $\\Delta u = f(x,y)$ es conocida como Ecuación de Poisson. En particular cuando $f(x,y) = 0$ la ecuación se conoce como Ecuación de Laplace. (Notar cómo la definición de las ecuaciones es consistente con que sean elípticas...)\nExisten dos tipos de condiciones de borde. Se puede imponer una condición en el borde tanto para $u$ (condiciones de Dirichlet) como para alguna derivada direccional $\\partial u / \\partial n$ (Condiciones de Neumann)\nAplicación del Método\nComo siempre declaramos el problema: Resolveremos la ecuación $\\Delta u = f$ en un rectángulo dado $[x_l, x_r] \\times [y_b, y_t]$. Además consideremos las condiciones de borde de Dirichlet que definen alguna función $g$ para cada borde, digamos:\n\\begin{align}\nu(x,y_b) = g_1(x)\\\nu(x,y_t) = g_2(x)\\\nu(x_l,y) = g_3(y)\\\nu(x_r,y) = g_4(y)\n\\end{align}\nAhora debemos discretizar el dominio bidimensional. Para $m$ puntos en el eje horizontal y $n$ en el vertical, es decir con $M = m-1$ y $N = n-1$ steps de tamaño $h = (x_r − x_l)/M$ and $k=(y_t − y_ b)/N$. Si reemplazamos las diferencias finitas centradas que estudiamos anteriormente en la ecuación de Poisson obtenemos:\n$$\\frac{u(x-h,y) -2u(x,y) + u(x+h,y)}{h^2} + \\mathcal{O}(h^2) + \\frac{u(x,y-k) -2u(x,y) + u(x,y+k)}{k^2} + \\mathcal{O}(k^2) = f(x,y)$$\nTrasladando esto a las soluciones aproximadas $w$ obtenemos\n$$\\frac{w_{i-1,j} -2w_{ij} + w_{i+1,j}}{h^2} + \\frac{w_{i,j-1}-2w_{ij}+ w_{i,j+1}}{k^2} = f(x_i, y_j)$$\nDonde $x_i = x_l + (i − 1)h$ y $y_j = y_b + (j − 1)k$\nLas incógnitas a resolver, al estar situadas en dos dimensiones, son incómodas de abordar, por lo que simplemente indexaremos de forma lineal las aproxmaciones $w_{ij}$\nEnd of explanation\nplt.figure(figsize=(10,5))\nplt.title(\"Mesh para coordenadas lineales\")\nfor y in yy:\n plt.plot(xx, [y for _x in xx], 'co')\nplt.xlim(xl-0.1, xr+0.1)\nplt.ylim(yb-0.1, yt+0.1)\nplt.xticks([xl, xr], ['$x_l$', '$x_r$'], fontsize=20)\nplt.yticks([yb, yt], ['$y_b$', '$y_t$'], fontsize=20)\nplt.text(xl, yb, \"$v_{1}$\", fontsize=20)\nplt.text(xl+h, yb, \"$v_{2}$\", fontsize=20)\nplt.text(xl, yb+k, \"$v_{m+1}$\", fontsize=20)\nplt.text(xr, yb+k, \"$v_{2m}$\", fontsize=20)\nplt.text(xl, yt, \"$v_{(n-1)m+1}$\", fontsize=20)\nplt.text(xr, yt, \"$v_{mn}$\", fontsize=20)\nplt.text(xr, yb, \"$v_{m}$\", fontsize=20)\nplt.title(\"Mesh para coordenadas en dos dimensiones\")\nplt.show()\nplt.show()\nExplanation: Para simplificar el sistema a resolver, cambiaremos los índices dobles por indices lineales mediante la conversión\n$$v_{i+(j-1)m} = w_{ij}$$\nSe puede pensar también como una operación de stack donde las filas de la grilla son colocadas una al lado de otra en orden.\nEnd of explanation\n%matplotlib notebook\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg\nfrom mpl_toolkits.mplot3d import Axes3D\ndef f(x,y):\n return 0.0\n# Condiciones de borde\ndef g1(x):\n return np.log(x**2 + 1)\ndef g2(x):\n return np.log(x**2 + 4)\ndef g3(y):\n return 2*np.log(y)\ndef g4(y):\n return np.log(y**2 + 1)\n# Puntos de la grilla\nm, n = 30, 30\n# Precálculo de m*n\nmn = m * n\n# Cantidad de steps\nM = m - 1\nN = n - 1\n# Limites del dominio, x_left, x_right, y_bottom, y_top\nxl, xr = (0.0, 1.0)\nyb, yt = (1.0, 2.0)\n# Tamaño de stepsize por dimensión\nh = (xr - xl) / float(M)\nk = (yt - yb) / float(N)\n# Precálculo de h**2 y k**2\nh2 = h**2.0\nk2 = k**2.0\n# Generar arreglos para dimension...\nx = [xl + (i - 1)*h for i in range(1, m+1)]\ny = [yb + (i - 1)*k for i in range(1, n+1)]\nA = np.zeros((mn, mn))\nb = np.zeros((mn))\nfor i in range(1, m-1):\n for j in range(1, n-1):\n A[i+(j-1)*m, i-1+(j-1)*m] = 1.0/h2\n A[i+(j-1)*m, i+1+(j-1)*m] = 1.0/h2\n \n A[i+(j-1)*m, i+(j-1)*m] = -2.0/h2 -2.0/k2\n \n A[i+(j-1)*m, i+(j-2)*m] = 1.0/k2\n A[i+(j-1)*m, i+j*m] = 1.0/k2\n \n b[i+(j-1)*m] = f(x[i], y[j])\nfor i in range(0,m):\n j = 0\n A[i+(j-1)*m, i+(j-1)*m] = 1.0\n b[i+(j-1)*m] = g1(x[i])\n j = n-1\n A[i+(j-1)*m, i+(j-1)*m] = 1.0\n b[i+(j-1)*m] = g2(x[i])\n \nfor j in range(1, n-1):\n i = 0\n A[i+(j-1)*m, i+(j-1)*m] = 1.0\n b[i+(j-1)*m] = g3(y[j])\n i = m-1\n A[i+(j-1)*m, i+(j-1)*m] = 1.0\n b[i+(j-1)*m] = g4(y[j])\nv = linalg.solve(A, b)\nw = np.reshape(v, (m,n))\nfig = plt.figure(figsize=(10,7))\nax = fig.add_subplot(111, projection='3d')\nxv, yv = np.meshgrid(x, y)\nax.plot_surface(xv, yv, w, rstride=1, cstride=1)\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.show()\nfig = plt.figure(figsize=(10,7))\nax = fig.add_subplot(111, projection='3d')\nxv, yv = np.meshgrid(x, y)\nzv = np.log(xv**2 + yv**2)\nax.plot_surface(xv, yv, zv, rstride=1, cstride=1)\nplt.show()\nExplanation: Luego debemos construir una matriz $A$ y un vector $b$ bajo esta nueva numeración tal que el sistema $Av=b$ sea resoluble y el resultado podamos trasladarlo de vuelta al sistema de $w_{ij}$. Esta matriz naturalmente será de tamaño $mn \\times mn$ y cada punto de la grilla tendrá su propia ecuación, como uno podría pensar.\nLa entrada $A_{pq}$ corresponde al $q$-ésimo coeficiente lineal de la $p$-ésima ecuación del sistema $Av =b$. Por ejemplo la ecuación\n$$\\frac{w_{i-1,j} -2w_{ij} + w_{i+1,j}}{h^2} + \\frac{w_{i,j-1}-2w_{ij}+ w_{i,j+1}}{k^2} = f(x_i, y_j)$$\nCorresponde a la ecuación para el punto $(i,j)$ de la grilla, y será la ecuación $p = i + (j-1)m$. Entonces, la entrada $A_{pq}$ no es más que, dada la conversión definida:\n\\begin{align}\nA_{i + (j-1)m, i + (j-1)m} &= -\\frac{2}{h^2}- \\frac{2}{k^2}\\\nA_{i + (j-1)m, i+1 + (j-1)m} &= \\frac{1}{h^2}\\\nA_{i + (j-1)m, i-1 + (j-1)m} &= \\frac{1}{h^2}\\\nA_{i + (j-1)m, i + jm} &= \\frac{1}{k^2}\\\nA_{i + (j-1)m, i + (j-2)m} &= \\frac{1}{k^2}\\\n\\end{align}\nAnálogamente los $b$ del lado derecho del sistema son, naturalmente, la función dada en el punto $(x_i, y_j)$\n$$b_{i+(j-1)m} = f(x_i, y_j)$$\nComo las condiciones de borde son conocidas estas ecuaciones excluyen dichos puntos, los índices $i,j$ van desde $1 < i < m$ y $1< j\n#\n# License: BSD-3-Clause\nimport matplotlib.pyplot as plt\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, compute_source_psd\nprint(__doc__)\nExplanation: Compute source power spectral density (PSD) in a label\nReturns an STC file containing the PSD (in dB) of each of the sources\nwithin a label.\nEnd of explanation\ndata_path = sample.data_path()\nmeg_path = data_path / 'MEG' / 'sample'\nraw_fname = meg_path / 'sample_audvis_raw.fif'\nfname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'\nfname_label = meg_path / 'labels' / 'Aud-lh.label'\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, verbose=False)\nevents = mne.find_events(raw, stim_channel='STI 014')\ninverse_operator = read_inverse_operator(fname_inv)\nraw.info['bads'] = ['MEG 2443', 'EEG 053']\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=False, exclude='bads')\ntmin, tmax = 0, 120 # use the first 120s of data\nfmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz\nn_fft = 2048 # the FFT size (n_fft). Ideally a power of 2\nlabel = mne.read_label(fname_label)\nstc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method=\"dSPM\",\n tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,\n pick_ori=\"normal\", n_fft=n_fft, label=label,\n dB=True)\nstc.save('psd_dSPM', overwrite=True)\nExplanation: Set parameters\nEnd of explanation\nplt.plot(stc.times, stc.data.T)\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('PSD (dB)')\nplt.title('Source Power Spectrum (PSD)')\nplt.show()\nExplanation: View PSD of sources in label\nEnd of explanation"}}},{"rowIdx":2195,"cells":{"Unnamed: 0":{"kind":"number","value":2195,"string":"2,195"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Copyright 2019 The TensorFlow Authors.\nStep1: 使用分布策略保存和加载模型\n\n \n \n \n \n
\n 在 TensorFlow.org 上查看在 Google Colab 中运行在 Github 上查看源代码下载笔记本
\n概述\n在训练期间一般需要保存和加载模型。有两组用于保存和加载 Keras 模型的 API:高级 API 和低级 API。本教程演示了在使用 tf.distribute.Strategy 时如何使用 SavedModel API。要了解 SavedModel 和序列化的相关概况,请参阅保存的模型指南和 Keras 模型序列化指南。让我们从一个简单的示例开始: \n导入依赖项:\nEnd of explanation\nmirrored_strategy = tf.distribute.MirroredStrategy()\ndef get_data():\n datasets, ds_info = tfds.load(name='mnist', with_info=True, as_supervised=True)\n mnist_train, mnist_test = datasets['train'], datasets['test']\n BUFFER_SIZE = 10000\n BATCH_SIZE_PER_REPLICA = 64\n BATCH_SIZE = BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync\n def scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n return image, label\n train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE)\n return train_dataset, eval_dataset\ndef get_model():\n with mirrored_strategy.scope():\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10)\n ])\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[tf.metrics.SparseCategoricalAccuracy()])\n return model\nExplanation: 使用 tf.distribute.Strategy 准备数据和模型:\nEnd of explanation\nmodel = get_model()\ntrain_dataset, eval_dataset = get_data()\nmodel.fit(train_dataset, epochs=2)\nExplanation: 训练模型:\nEnd of explanation\nkeras_model_path = \"/tmp/keras_save\"\nmodel.save(keras_model_path)\nExplanation: 保存和加载模型\n现在,您已经有一个简单的模型可供使用,让我们了解一下如何保存/加载 API。有两组可用的 API:\n高级 Keras model.save 和 tf.keras.models.load_model\n低级 tf.saved_model.save 和 tf.saved_model.load\nKeras API\n以下为使用 Keras API 保存和加载模型的示例:\nEnd of explanation\nrestored_keras_model = tf.keras.models.load_model(keras_model_path)\nrestored_keras_model.fit(train_dataset, epochs=2)\nExplanation: 恢复无 tf.distribute.Strategy 的模型:\nEnd of explanation\nanother_strategy = tf.distribute.OneDeviceStrategy(\"/cpu:0\")\nwith another_strategy.scope():\n restored_keras_model_ds = tf.keras.models.load_model(keras_model_path)\n restored_keras_model_ds.fit(train_dataset, epochs=2)\nExplanation: 恢复模型后,您可以继续在它上面进行训练,甚至无需再次调用 compile(),因为在保存之前已经对其进行了编译。模型以 TensorFlow 的标准 SavedModel proto 格式保存。有关更多信息,请参阅 saved_model 格式指南。\n现在,加载模型并使用 tf.distribute.Strategy 进行训练:\nEnd of explanation\nmodel = get_model() # get a fresh model\nsaved_model_path = \"/tmp/tf_save\"\ntf.saved_model.save(model, saved_model_path)\nExplanation: 如您所见, tf.distribute.Strategy 可以按预期进行加载。此处使用的策略不必与保存前所用策略相同。 \ntf.saved_model API\n现在,让我们看一下较低级别的 API。保存模型与 Keras API 类似:\nEnd of explanation\nDEFAULT_FUNCTION_KEY = \"serving_default\"\nloaded = tf.saved_model.load(saved_model_path)\ninference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]\nExplanation: 可以使用 tf.saved_model.load() 进行加载。但是,由于该 API 级别较低(因此用例范围更广泛),所以不会返回 Keras 模型。相反,它返回一个对象,其中包含可用于进行推断的函数。例如:\nEnd of explanation\npredict_dataset = eval_dataset.map(lambda image, label: image)\nfor batch in predict_dataset.take(1):\n print(inference_func(batch))\nExplanation: 加载的对象可能包含多个函数,每个函数与一个键关联。\"serving_default\" 是使用已保存的 Keras 模型的推断函数的默认键。要使用此函数进行推断,请运行以下代码:\nEnd of explanation\nanother_strategy = tf.distribute.MirroredStrategy()\nwith another_strategy.scope():\n loaded = tf.saved_model.load(saved_model_path)\n inference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]\n dist_predict_dataset = another_strategy.experimental_distribute_dataset(\n predict_dataset)\n # Calling the function in a distributed manner\n for batch in dist_predict_dataset:\n another_strategy.run(inference_func,args=(batch,))\nExplanation: 您还可以采用分布式方式加载和进行推断:\nEnd of explanation\nimport tensorflow_hub as hub\ndef build_model(loaded):\n x = tf.keras.layers.Input(shape=(28, 28, 1), name='input_x')\n # Wrap what's loaded to a KerasLayer\n keras_layer = hub.KerasLayer(loaded, trainable=True)(x)\n model = tf.keras.Model(x, keras_layer)\n return model\nanother_strategy = tf.distribute.MirroredStrategy()\nwith another_strategy.scope():\n loaded = tf.saved_model.load(saved_model_path)\n model = build_model(loaded)\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[tf.metrics.SparseCategoricalAccuracy()])\n model.fit(train_dataset, epochs=2)\nExplanation: 调用已恢复的函数只是基于已保存模型的前向传递(预测)。如果您想继续训练加载的函数,或者将加载的函数嵌入到更大的模型中,应如何操作? 通常的做法是将此加载对象包装到 Keras 层以实现此目的。幸运的是,TF Hub 为此提供了 hub.KerasLayer,如下所示:\nEnd of explanation\nmodel = get_model()\n# Saving the model using Keras's save() API\nmodel.save(keras_model_path) \nanother_strategy = tf.distribute.MirroredStrategy()\n# Loading the model using lower level API\nwith another_strategy.scope():\n loaded = tf.saved_model.load(keras_model_path)\nExplanation: 如您所见,hub.KerasLayer 可将从 tf.saved_model.load() 加载回的结果封装到可用于构建其他模型的 Keras 层。这对于迁移学习非常实用。 \n我应使用哪种 API?\n对于保存,如果您使用的是 Keras 模型,那么始终建议使用 Keras 的 model.save() API。如果您保存的不是 Keras 模型,那么您只能选择使用较低级的 API。\n对于加载,使用哪种 API 取决于您要从加载的 API 中获得什么。如果您无法或不想获取 Keras 模型,请使用 tf.saved_model.load()。否则,请使用 tf.keras.models.load_model()。请注意,只有保存 Keras 模型后,才能恢复 Keras 模型。\n可以混合使用 API。您可以使用 model.save 保存 Keras 模型,并使用低级 API tf.saved_model.load 加载非 Keras 模型。\nEnd of explanation\nmodel = get_model()\n# Saving the model to a path on localhost.\nsaved_model_path = \"/tmp/tf_save\"\nsave_options = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')\nmodel.save(saved_model_path, options=save_options)\n# Loading the model from a path on localhost.\nanother_strategy = tf.distribute.MirroredStrategy()\nwith another_strategy.scope():\n load_options = tf.saved_model.LoadOptions(experimental_io_device='/job:localhost')\n loaded = tf.keras.models.load_model(saved_model_path, options=load_options)\nExplanation: 从本地设备保存/加载\n要在远程运行(例如使用 Cloud TPU)的情况下从本地 I/O 设备保存和加载,则必须使用选项 experimental_io_device 将 I/O 设备设置为本地主机。\nEnd of explanation\nclass SubclassedModel(tf.keras.Model):\n output_name = 'output_layer'\n def __init__(self):\n super(SubclassedModel, self).__init__()\n self._dense_layer = tf.keras.layers.Dense(\n 5, dtype=tf.dtypes.float32, name=self.output_name)\n def call(self, inputs):\n return self._dense_layer(inputs)\nmy_model = SubclassedModel()\n# my_model.save(keras_model_path) # ERROR! \ntf.saved_model.save(my_model, saved_model_path)\nExplanation: 警告\n有一种特殊情况,您的 Keras 模型没有明确定义的输入。例如,可以创建没有任何输入形状的序贯模型 (Sequential([Dense(3), ...])。子类化模型在初始化后也没有明确定义的输入。在这种情况下,在保存和加载时都应坚持使用较低级别的 API,否则会出现错误。\n要检查您的模型是否具有明确定义的输入,只需检查 model.inputs 是否为 None。如果非 None,则一切正常。在 .fit、.evaluate、.predict 中使用模型,或调用模型 (model(inputs)) 时,输入形状将自动定义。\n以下为示例:\nEnd of explanation"}}},{"rowIdx":2196,"cells":{"Unnamed: 0":{"kind":"number","value":2196,"string":"2,196"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\nStep1: Implementing a Neural Network\nIn this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.\nStep2: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.\nStep3: Forward pass\nStep4: Forward pass\nStep5: Backward pass\nImplement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check\nStep6: Train the network\nTo train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.\nOnce you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.\nStep8: Load the data\nNow that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.\nStep9: Train a network\nTo train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.\nStep10: Debug the training\nWith the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.\nOne strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.\nAnother strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.\nStep11: Tune your hyperparameters\nWhat's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.\nTuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.\nApproximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.\nExperiment\nStep12: Run on the test set\nWhen you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.\nWe will give you extra bonus point for every 1% of accuracy above 52%."},"code_prompt":{"kind":"string","value":"Python Code:\n# A bit of setup\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.neural_net import TwoLayerNet\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\ndef rel_error(x, y):\n returns relative error \n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\nExplanation: Implementing a Neural Network\nIn this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.\nEnd of explanation\n# Create a small net and some toy data to check your implementations.\n# Note that we set the random seed for repeatable experiments.\ninput_size = 4\nhidden_size = 10\nnum_classes = 3\nnum_inputs = 5\ndef init_toy_model():\n np.random.seed(0)\n return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)\ndef init_toy_data():\n np.random.seed(1)\n X = 10 * np.random.randn(num_inputs, input_size)\n y = np.array([0, 1, 2, 2, 1])\n return X, y\nnet = init_toy_model()\nX, y = init_toy_data()\nExplanation: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.\nEnd of explanation\nscores = net.loss(X)\nprint 'Your scores:'\nprint scores\nprint\nprint 'correct scores:'\ncorrect_scores = np.asarray([\n [-0.81233741, -1.27654624, -0.70335995],\n [-0.17129677, -1.18803311, -0.47310444],\n [-0.51590475, -1.01354314, -0.8504215 ],\n [-0.15419291, -0.48629638, -0.52901952],\n [-0.00618733, -0.12435261, -0.15226949]])\nprint correct_scores\nprint\n# The difference should be very small. We get < 1e-7\nprint 'Difference between your scores and correct scores:'\nprint np.sum(np.abs(scores - correct_scores))\nExplanation: Forward pass: compute scores\nOpen the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters. \nImplement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.\nEnd of explanation\nloss, _ = net.loss(X, y, reg=0.1)\ncorrect_loss = 1.30378789133\n# should be very small, we get < 1e-12\nprint 'Difference between your loss and correct loss:'\nprint np.sum(np.abs(loss - correct_loss))\nExplanation: Forward pass: compute loss\nIn the same function, implement the second part that computes the data and regularizaion loss.\nEnd of explanation\nfrom cs231n.gradient_check import eval_numerical_gradient\n# Use numeric gradient checking to check your implementation of the backward pass.\n# If your implementation is correct, the difference between the numeric and\n# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.\nloss, grads = net.loss(X, y, reg=0.1)\n# these should all be less than 1e-8 or so\nfor param_name in grads:\n f = lambda W: net.loss(X, y, reg=0.1)[0]\n param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)\n print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))\nExplanation: Backward pass\nImplement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:\nEnd of explanation\nnet = init_toy_model()\nstats = net.train(X, y, X, y,\n learning_rate=1e-1, reg=1e-5,\n num_iters=100, verbose=False)\nprint 'Final training loss: ', stats['loss_history'][-1]\n# plot the loss history\nplt.plot(stats['loss_history'])\nplt.xlabel('iteration')\nplt.ylabel('training loss')\nplt.title('Training Loss history')\nplt.show()\nExplanation: Train the network\nTo train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.\nOnce you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.\nEnd of explanation\nfrom cs231n.data_utils import load_CIFAR10\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n \n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n # Reshape data to rows\n X_train = X_train.reshape(num_training, -1)\n X_val = X_val.reshape(num_validation, -1)\n X_test = X_test.reshape(num_test, -1)\n return X_train, y_train, X_val, y_val, X_test, y_test\n# Invoke the above function to get our data.\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\nprint 'Train data shape: ', X_train.shape\nprint 'Train labels shape: ', y_train.shape\nprint 'Validation data shape: ', X_val.shape\nprint 'Validation labels shape: ', y_val.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', y_test.shape\nExplanation: Load the data\nNow that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.\nEnd of explanation\ninput_size = 32 * 32 * 3\nhidden_size = 50\nnum_classes = 10\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\n# Train the network\nstats = net.train(X_train, y_train, X_val, y_val,\n num_iters=1000, batch_size=200,\n learning_rate=1e-4, learning_rate_decay=0.95,\n reg=0.5, verbose=True)\n# Predict on the validation set\nval_acc = (net.predict(X_val) == y_val).mean()\nprint 'Validation accuracy: ', val_acc\nExplanation: Train a network\nTo train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.\nEnd of explanation\n# Plot the loss function and train / validation accuracies\nplt.subplot(2, 1, 1)\nplt.plot(stats['loss_history'])\nplt.title('Loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\nplt.subplot(2, 1, 2)\nplt.plot(stats['train_acc_history'], label='train')\nplt.plot(stats['val_acc_history'], label='val')\nplt.title('Classification accuracy history')\nplt.xlabel('Epoch')\nplt.ylabel('Clasification accuracy')\nplt.show()\nfrom cs231n.vis_utils import visualize_grid\n# Visualize the weights of the network\ndef show_net_weights(net):\n W1 = net.params['W1']\n W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)\n plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))\n plt.gca().axis('off')\n plt.show()\nshow_net_weights(net)\nExplanation: Debug the training\nWith the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.\nOne strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.\nAnother strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.\nEnd of explanation\nbest_net = None # store the best model into this \nlearning = [1e-5, 1e-3]\nregularization = [0, 1]\ndecay = [0.9, 1]\nresults = {}\nbest_val = -1\nfor num_hidden in np.arange(50, 300, 50):\n for _ in np.arange(0, 50):\n i = np.random.uniform(low=learning[0], high=learning[1])\n j = np.random.uniform(low=regularization[0], high=regularization[1])\n k = np.random.uniform(low=decay[0], high=decay[1])\n # Train the network\n net = TwoLayerNet(input_size, num_hidden, num_classes)\n stats = net.train(X_train, y_train, X_val, y_val,\n num_iters=500, batch_size=200,\n learning_rate=i, learning_rate_decay=k,\n reg=j, verbose=False)\n \n # Predict on the validation set\n val_acc = (net.predict(X_val) == y_val).mean()\n \n results[(num_hidden, i, j, k)] = val_acc\n if val_acc > best_val:\n best_val = val_acc\n# Print the obtained accuracies\nfor nh, lr, reg, dec in sorted(results):\n print 'Hidden: %d, learning rate: %f, regularisation: %f, decay: %f -> %f' % ( \\\n nh, lr, reg, dec, results[nh, lr, reg, dec])\n# Find the best learning rate and regularization strength\nbest_hidden = 25\nbest_lr = 0.000958\nbest_reg = 0.952745\nbest_decay = 0.935156\nbest_val = -1\nfor nh, lr, reg, dec in sorted(results):\n if results[(nh, lr, reg, dec)] > best_val:\n best_val = results[(nh, lr, reg, dec)]\n best_hidden = nh\n best_lr = lr\n best_reg = reg\n best_decay = dec\n# Train the best_svm with more iterations\nbest_net = TwoLayerNet(input_size, best_hidden, num_classes)\nstats = best_net.train(X_train, y_train, X_val, y_val,\n num_iters=2000, batch_size=200,\n learning_rate=best_lr, learning_rate_decay=best_decay,\n reg=best_reg, verbose=True)\n# Predict on the validation set\nval_acc = (net.predict(X_val) == y_val).mean()\nprint 'Best validation accuracy now: %f' % val_acc\n# visualize the weights of the best network\nshow_net_weights(best_net)\nExplanation: Tune your hyperparameters\nWhat's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.\nTuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.\nApproximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.\nExperiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).\nEnd of explanation\ntest_acc = (best_net.predict(X_test) == y_test).mean()\nprint 'Test accuracy: ', test_acc\nExplanation: Run on the test set\nWhen you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.\nWe will give you extra bonus point for every 1% of accuracy above 52%.\nEnd of explanation"}}},{"rowIdx":2197,"cells":{"Unnamed: 0":{"kind":"number","value":2197,"string":"2,197"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n This notebook uses mvncall to phase two multiallelic SNPs within VGSC and to add back in the insecticide resistance linked N1570Y SNP filtered out of the PASS callset.\nStep1: install mvncall\nmvncall depends on a couple of boost libraries, I installed these first using \"sudo apt-get install libboost-dev\"\nStep2: prepare input files\nStep3: list of SNPs to phase\nStep4: haplotype scaffold\nStep5: convert to numpy arrays\nso we can interleave these variants back into the genotype array easily\nStep6: check parsing..."},"code_prompt":{"kind":"string","value":"Python Code:\n%run setup.ipynb\nExplanation: This notebook uses mvncall to phase two multiallelic SNPs within VGSC and to add back in the insecticide resistance linked N1570Y SNP filtered out of the PASS callset.\nEnd of explanation\n%%bash --err install_err --out install_out\n# This script downloads and installs mvncall. We won't include this in \n# the standard install.sh script as this is not something we want to do\n# as part of continuous integration, it is only needed for this data \n# generation task.\nset -xeo pipefail\ncd ../dependencies\nif [ ! -f mvncall.installed ]; then\n echo installing mvncall\n \n # clean up\n rm -rvf mvncall*\n \n # download and unpack\n wget https://mathgen.stats.ox.ac.uk/genetics_software/mvncall/mvncall_v1.0_x86_64_dynamic.tgz\n tar zxvf mvncall_v1.0_x86_64_dynamic.tgz\n \n # trick mnvcall into finding boost libraries - their names aren't what mvncall expects\n locate libboost_iostreams | xargs -I '{}' ln -v -f -s '{}' libboost_iostreams.so.5\n locate libboost_program_options | xargs -I '{}' ln -v -f -s '{}' libboost_program_options.so.5\n \n # try running mvncall\n export LD_LIBRARY_PATH=.\n ./mvncall_v1.0_x86_64_dynamic/mvncall \n \n # mark success\n touch mvncall.installed\nelse\n echo mvncall already installed\n \nfi\n#check install\nprint(install_out)\n# check we can run mvncall\nmvncall = 'LD_LIBRARY_PATH=../dependencies ../dependencies/mvncall_v1.0_x86_64_dynamic/mvncall'\n!{mvncall}\nExplanation: install mvncall\nmvncall depends on a couple of boost libraries, I installed these first using \"sudo apt-get install libboost-dev\"\nEnd of explanation\n# these are the source data files for the phasing\nsample_file = '../ngs.sanger.ac.uk/production/ag1000g/phase1/AR3.1/haplotypes/main/shapeit/ag1000g.phase1.ar3.1.haplotypes.2L.sample.gz'\nvcf_file = '../ngs.sanger.ac.uk/production/ag1000g/phase1/AR3/variation/main/vcf/ag1000g.phase1.ar3.2L.vcf.gz'\nscaffold_file = '../ngs.sanger.ac.uk/production/ag1000g/phase1/AR3.1/haplotypes/main/shapeit/ag1000g.phase1.ar3.1.haplotypes.2L.haps.gz'\nExplanation: prepare input files\nEnd of explanation\n# this file will contain the list of SNPs to be phased\nlist_file = '../data/phasing_extra_phase1.list'\n%%file {list_file}\n2391228\n2400071\n2429745\n# for mvncall we need a simple manifest of sample IDs\n# N.B., we will exclude the cross parents\n!gunzip -v {sample_file} -c | head -n 767 | tail -n 765 | cut -d' ' -f1 > /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.sample\n!head /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.sample\n!tail /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.sample\n!wc -l /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.sample\nExplanation: list of SNPs to phase\nEnd of explanation\n# mvncall needs the haps unzipped. Also we will exclude the cross parents\n!if [ ! -f /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.haps ]; then gunzip -v {scaffold_file} -c | cut -d' ' -f1-1535 > /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.haps; fi\n# check cut has worked\n!head -n1 /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.haps\n# check cut has worked\n!head -n1 /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.haps | wc\n# mvncall needs an unzipped VCF, we'll extract only the region we need\nregion_vgsc = SeqFeature('2L', 2358158, 2431617)\nregion_vgsc.region_str\n# extract the VCF\n!bcftools view -r {region_vgsc.region_str} --output-file /tmp/vgsc.vcf --output-type v {vcf_file}\n%%bash\nfor numsnps in 50 100 200; do\n echo $numsnps\ndone\n%%bash\n# run mvncall, only if output file doesn't exist (it's slow)\nmvncall=\"../dependencies/mvncall_v1.0_x86_64_dynamic/mvncall\"\nexport LD_LIBRARY_PATH=../dependencies\nfor numsnps in 50 100 200; do\n output_file=../data/phasing_extra_phase1.mvncall.${numsnps}.vcf\n if [ ! -f $output_file ]; then \n echo running mvncall $numsnps\n $mvncall \\\n --sample-file /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.sample \\\n --glfs /tmp/vgsc.vcf \\\n --scaffold-file /tmp/ag1000g.phase1.ar3.1.haplotypes.2L.haps \\\n --list ../data/phasing_extra_phase1.list \\\n --numsnps $numsnps \\\n --o $output_file > /tmp/mvncall.${numsnps}.log \n else\n echo skipping mvncall $numsnps\n fi\n \ndone\n!tail /tmp/mvncall.100.log\n!cat ../data/phasing_extra_phase1.mvncall.50.vcf\n!ls -lh ../data/*.mvncall*\nExplanation: haplotype scaffold\nEnd of explanation\ndef vcf_to_numpy(numsnps):\n \n # input VCF filename\n vcf_fn = '../data/phasing_extra_phase1.mvncall.{}.vcf'.format(numsnps)\n \n # extract variants\n variants = vcfnp.variants(vcf_fn, cache=False,\n dtypes={'REF': 'S1', 'ALT': 'S1'},\n flatten_filter=True)\n # fix the chromosome\n variants['CHROM'] = (b'2L',) * len(variants)\n # extract calldata\n calldata = vcfnp.calldata_2d(vcf_fn, cache=False,\n fields=['genotype', 'GT', 'is_phased'])\n # N.B., there is a trailing tab character somewhere in the input VCFs (samples line?) \n # which means an extra sample gets added when parsing. Hence we will trim off the last\n # field.\n calldata = calldata[:, :-1]\n \n # save output\n output_fn = vcf_fn[:-3] + 'npz'\n np.savez_compressed(output_fn, variants=variants, calldata=calldata)\nfor numsnps in 50, 100, 200:\n vcf_to_numpy(numsnps)\nExplanation: convert to numpy arrays\nso we can interleave these variants back into the genotype array easily\nEnd of explanation\ncallset = np.load('../data/phasing_extra_phase1.mvncall.200.npz')\ncallset\nvariants = callset['variants']\nallel.VariantTable(variants)\ncalldata = callset['calldata']\ng = allel.GenotypeArray(calldata['genotype'])\ng.is_phased = calldata['is_phased']\ng.displayall()\nExplanation: check parsing...\nEnd of explanation"}}},{"rowIdx":2198,"cells":{"Unnamed: 0":{"kind":"number","value":2198,"string":"2,198"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Read data\nStep1: Add features\nStep2: Feature pdfs\nStep3: One versus One\nPrepare datasets\nStep4: Prepare stacking variables\nStep5: Multiclassification"},"code_prompt":{"kind":"string","value":"Python Code:\ntreename = 'tag'\ndata_b = pandas.DataFrame(root_numpy.root2array('datasets/type=5.root', treename=treename)).dropna()\ndata_b = data_b[::40]\ndata_c = pandas.DataFrame(root_numpy.root2array('datasets/type=4.root', treename=treename)).dropna()\ndata_light = pandas.DataFrame(root_numpy.root2array('datasets/type=0.root', treename=treename)).dropna()\ndata = {'b': data_b, 'c': data_c, 'light': data_light}\njet_features = [column for column in data_b.columns if \"Jet\" in column]\nsv_features = [column for column in data_b.columns if \"SV\" in column]\nprint \"Jet features\", \", \".join(jet_features)\nprint \"SV features\", \", \".join(sv_features)\nExplanation: Read data\nEnd of explanation\nfor d in data.values():\n d['log_SVFDChi2'] = numpy.log(d['SVFDChi2'].values)\n d['log_SVSumIPChi2'] = numpy.log(d['SVSumIPChi2'].values)\n d['SVM_diff'] = numpy.log(d['SVMC'] ** 2 - d['SVM']**2)\n d['SVM_rel'] = numpy.tanh(d['SVM'] / d['SVMC'])\n d['SVM_rel2'] = (d['SVM'] / d['SVMC'])**2\n d['SVR_rel'] = d['SVDR'] / (d['SVR'] + 1e-5)\n d['R_FD_rel'] = numpy.tanh(d['SVR'] / d['SVFDChi2'])\n d['jetP'] = numpy.sqrt(d['JetPx'] ** 2 + d['JetPy'] ** 2 + d['JetPz'] ** 2)\n d['jetPt'] = numpy.sqrt(d['JetPx'] ** 2 + d['JetPy'] ** 2)\n d['jetM'] = numpy.sqrt(d['JetE'] ** 2 - d['jetP'] ** 2 )\n d['SV_jet_M_rel'] = d['SVM'] / d['jetM']\n d['SV_jet_MC_rel'] = d['SVMC'] / d['jetM']\n \n# full_data['P_Sin'] = 0.5 * d['SVMC'].values - (d['SVM'].values)**2 / (2. * d['SVMC'].values)\n# full_data['Psv'] = d['SVPT'].values * d['P_Sin'].values\n# full_data['Psv2'] = d['P_Sin'].values / d['SVPT'].values\n# full_data['Mt'] = d['SVMC'].values - d['P_Sin'].values\n# full_data['QtoN'] = 1. * d['SVQ'].values / d['SVN'].values\ndata_b = data_b.drop(['JetParton', 'JetFlavor', 'JetPx', 'JetPy'], axis=1)\ndata_c = data_c.drop(['JetParton', 'JetFlavor', 'JetPx', 'JetPy'], axis=1)\ndata_light = data_light.drop(['JetParton', 'JetFlavor', 'JetPx', 'JetPy'], axis=1)\njet_features = [column for column in data_b.columns if \"Jet\" in column]\nadditional_features = ['log_SVFDChi2', 'log_SVSumIPChi2', \n 'SVM_diff', 'SVM_rel', 'SVR_rel', 'SVM_rel2', 'SVR_rel', 'R_FD_rel',\n 'jetP', 'jetPt', 'jetM', 'SV_jet_M_rel', 'SV_jet_MC_rel']\nExplanation: Add features\nEnd of explanation\nfigsize(18, 60)\nfor i, feature in enumerate(data_b.columns):\n subplot(len(data_b.columns) / 3, 3, i)\n hist(data_b[feature].values, label='b', alpha=0.2, bins=60, normed=True)\n hist(data_c[feature].values, label='c', alpha=0.2, bins=60, normed=True)\n# hist(data_light[feature].values, label='light', alpha=0.2, bins=60, normed=True)\n xlabel(feature); legend(loc='best'); \n title(roc_auc_score([0] * len(data_b) + [1]*len(data_c),\n numpy.hstack([data_b[feature].values, data_c[feature].values])))\nlen(data_b), len(data_c), len(data_light)\njet_features = jet_features[2:]\nExplanation: Feature pdfs\nEnd of explanation\ndata_b_c_lds = LabeledDataStorage(pandas.concat([data_b, data_c]), [1] * len(data_b) + [0] * len(data_c))\ndata_c_light_lds = LabeledDataStorage(pandas.concat([data_c, data_light]), [1] * len(data_c) + [0] * len(data_light))\ndata_b_light_lds = LabeledDataStorage(pandas.concat([data_b, data_light]), [1] * len(data_b) + [0] * len(data_light))\ndef one_vs_one_training(base_estimators, data_b_c_lds, data_c_light_lds, data_b_light_lds, full_data, \n prefix='bdt', folding=True, features=None):\n if folding: \n tt_folding_b_c = FoldingClassifier(base_estimators[0], n_folds=2, random_state=11, parallel_profile=PROFILE, \n features=features)\n tt_folding_c_light = FoldingClassifier(base_estimators[1], n_folds=2, random_state=11, parallel_profile=PROFILE, \n features=features)\n tt_folding_b_light = FoldingClassifier(base_estimators[2], n_folds=2, random_state=11, parallel_profile=PROFILE, \n features=features)\n else:\n tt_folding_b_c = base_estimators[0]\n tt_folding_b_c.features = features\n tt_folding_c_light = base_estimators[1]\n tt_folding_c_light.features = features\n tt_folding_b_light = base_estimators[2]\n tt_folding_b_light.features = features\n \n %time tt_folding_b_c.fit_lds(data_b_c_lds)\n \n %time tt_folding_c_light.fit_lds(data_c_light_lds)\n %time tt_folding_b_light.fit_lds(data_b_light_lds)\n bdt_b_c = numpy.concatenate([tt_folding_b_c.predict_proba(pandas.concat([data_b, data_c])),\n tt_folding_b_c.predict_proba(data_light)])[:, 1]\n bdt_c_light = numpy.concatenate([tt_folding_c_light.predict_proba(data_b), \n tt_folding_c_light.predict_proba(pandas.concat([data_c, data_light]))])[:, 1]\n p_b_light = tt_folding_b_light.predict_proba(pandas.concat([data_b, data_light]))[:, 1]\n bdt_b_light = numpy.concatenate([p_b_light[:len(data_b)], tt_folding_b_light.predict_proba(data_c)[:, 1], \n p_b_light[len(data_b):]])\n \n full_data[prefix + '_b_c'] = bdt_b_c\n full_data[prefix + '_b_light'] = bdt_b_light\n full_data[prefix + '_c_light'] = bdt_c_light\nExplanation: One versus One\nPrepare datasets:\nb vs c\nb vs light\nc vs light\nEnd of explanation\nfull_data = pandas.concat([data_b, data_c, data_light])\nfull_data['label'] = [0] * len(data_b) + [1] * len(data_c) + [2] * len(data_light)\nfrom hep_ml.nnet import MLPClassifier\nfrom rep.estimators import SklearnClassifier\none_vs_one_training([SklearnClassifier(MLPClassifier(layers=(30, 10), epochs=700, random_state=11))]*3, \n data_b_c_lds, data_c_light_lds, data_b_light_lds, full_data, 'mlp', folding=True,\n features=sv_features + additional_features + jet_features)\nfrom sklearn.linear_model import LogisticRegression\none_vs_one_training([LogisticRegression()]*3, \n data_b_c_lds, data_c_light_lds, data_b_light_lds, full_data, \n 'logistic', folding=True, features=sv_features + additional_features + jet_features)\n# from sklearn.svm import SVC\n# from sklearn.pipeline import make_pipeline\n# from sklearn.preprocessing import StandardScaler\n# svm_feat = SklearnClassifier(make_pipeline(StandardScaler(), SVC(probability=True)), features=sv_features)\n# %time svm_feat.fit(data_b_c_lds.data, data_b_c_lds.target)\n# from sklearn.neighbors import KNeighborsClassifier\n# one_vs_one_training([KNeighborsClassifier(metric='canberra')]*3, \n# data_b_c_lds, data_c_light_lds, data_b_light_lds, full_data, \n# 'knn', folding=True, features=sv_features)\n# from rep.estimators import TheanetsClassifier\n# theanets_base = TheanetsClassifier(layers=(20, 10), trainers=[{'algo': 'adadelta', 'learining_rate': 0.1}, ])\n# nn = FoldingClassifier(theanets_base, features=sv_features, random_state=11, parallel_profile='ssh-py2')\n# nn.fit(full_data, full_data.label)\n# multi_probs = nn.predict_proba(full_data)\n# full_data['th_0'] = multi_probs[:, 0] / multi_probs[:, 1] \n# full_data['th_1'] = multi_probs[:, 0] / multi_probs[:, 2] \n# full_data['th_2'] = multi_probs[:, 1] / multi_probs[:, 2] \nmlp_features = ['mlp_b_c', 'mlp_b_light', 'mlp_c_light']\n# knn_features = ['knn_b_c', 'knn_b_light', 'knn_c_light']\n# th_features = ['th_0', 'th_1', 'th_2']\nlogistic_features = ['logistic_b_c', 'logistic_b_light', 'logistic_c_light']\nExplanation: Prepare stacking variables\nEnd of explanation\ndata_multi_lds = LabeledDataStorage(full_data, 'label')\nvariables_final = set(sv_features + additional_features + jet_features + mlp_features)\n# variables_final = list(variables_final - {'SVN', 'SVQ', 'log_SVFDChi2', 'log_SVSumIPChi2', 'SVM_rel2', 'JetE', 'JetNDis'})\nfrom rep.estimators import XGBoostClassifier\nxgb_base = XGBoostClassifier(n_estimators=3000, colsample=0.7, eta=0.005, nthreads=8, \n subsample=0.7, max_depth=6)\nmulti_folding_rbf = FoldingClassifier(xgb_base, n_folds=2, random_state=11, \n features=variables_final)\n%time multi_folding_rbf.fit_lds(data_multi_lds)\nmulti_probs = multi_folding_rbf.predict_proba(full_data)\n'log loss', -numpy.log(multi_probs[numpy.arange(len(multi_probs)), full_data['label']]).sum() / len(full_data)\nmulti_folding_rbf.get_feature_importances()\nlabels = full_data['label'].values.astype(int)\nmulticlass_result = generate_result(1 - roc_auc_score(labels > 0, multi_probs[:, 0] / multi_probs[:, 1], \n sample_weight=(labels != 2) * 1),\n 1 - roc_auc_score(labels > 1, multi_probs[:, 0] / multi_probs[:, 2],\n sample_weight=(labels != 1) * 1),\n 1 - roc_auc_score(labels > 1, multi_probs[:, 1] / multi_probs[:, 2],\n sample_weight=(labels != 0) * 1),\n label='multiclass')\nresult = pandas.concat([multiclass_result])\nresult.index = result['name']\nresult = result.drop('name', axis=1)\nresult\nExplanation: Multiclassification\nEnd of explanation"}}},{"rowIdx":2199,"cells":{"Unnamed: 0":{"kind":"number","value":2199,"string":"2,199"},"text_prompt":{"kind":"string","value":"Given the following text description, write Python code to implement the functionality described below step by step\nDescription:\n Setup data\nWe're going to look at the IMDB dataset, which contains movie reviews from IMDB, along with their sentiment. Keras comes with some helpers for this dataset.\nStep1: This is the word list\nStep2: ...and this is the mapping from id to word\nStep3: We download the reviews using code copied from keras.datasets\nStep4: Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.\nStep5: The first word of the first review is 23022. Let's see what that is.\nStep6: Here's the whole review, mapped from ids to words.\nStep7: The labels are 1 for positive, 0 for negative.\nStep8: Reduce vocab size by setting rare words to max index.\nStep9: Look at distribution of lengths of sentences.\nStep10: Pad (with zero) or truncate each sentence to make consistent length.\nStep11: This results in nice rectangular matrices that can be passed to ML algorithms. Reviews shorter than 500 words are pre-padded with zeros, those greater are truncated.\nStep12: Create simple models\nSingle hidden layer NN\nThe simplest model that tends to give reasonable results is a single hidden layer net. So let's try that. Note that we can't expect to get any useful results by feeding word ids directly into a neural net - so instead we use an embedding to replace them with a vector of 32 (initially random) floats for each word in the vocab.\nStep13: The stanford paper that this dataset is from cites a state of the art accuracy (without unlabelled data) of 0.883. So we're short of that, but on the right track.\nSingle conv layer with max pooling\nA CNN is likely to work better, since it's designed to take advantage of ordered data. We'll need to use a 1D CNN, since a sequence of words is 1D.\nStep14: That's well past the Stanford paper's accuracy - another win for CNNs!\nStep16: Pre-trained vectors\nYou may want to look at wordvectors.ipynb before moving on.\nIn this section, we replicate the previous CNN, but using pre-trained embeddings.\nStep17: The glove word ids and imdb word ids use different indexes. So we create a simple function that creates an embedding matrix using the indexes from imdb, and the embeddings from glove (where they exist).\nStep18: We pass our embedding matrix to the Embedding constructor, and set it to non-trainable.\nStep19: We already have beaten our previous model! But let's fine-tune the embedding weights - especially since the words we couldn't find in glove just have random embeddings.\nStep20: As expected, that's given us a nice little boost. \nStep21: Multi-size CNN\nThis is an implementation of a multi-size CNN as shown in Ben Bowles' excellent blog post.\nStep22: We use the functional API to create multiple conv layers of different sizes, and then concatenate them.\nStep23: We then replace the conv/max-pool layer in our original CNN with the concatenated conv layers.\nStep24: Interestingly, I found that in this case I got best results when I started the embedding layer as being trainable, and then set it to non-trainable after a couple of epochs. I have no idea why!\nStep25: This more complex architecture has given us another boost in accuracy.\nLSTM\nWe haven't covered this bit yet!"},"code_prompt":{"kind":"string","value":"Python Code:\nfrom keras.datasets import imdb\nidx = imdb.get_word_index()\nExplanation: Setup data\nWe're going to look at the IMDB dataset, which contains movie reviews from IMDB, along with their sentiment. Keras comes with some helpers for this dataset.\nEnd of explanation\nidx_arr = sorted(idx, key=idx.get)\nidx_arr[:10]\nExplanation: This is the word list:\nEnd of explanation\nidx2word = {v: k for k, v in idx.iteritems()}\nExplanation: ...and this is the mapping from id to word\nEnd of explanation\npath = get_file('imdb_full.pkl',\n origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl',\n md5_hash='d091312047c43cf9e4e38fef92437263')\nf = open(path, 'rb')\n(x_train, labels_train), (x_test, labels_test) = pickle.load(f)\nlen(x_train)\nExplanation: We download the reviews using code copied from keras.datasets:\nEnd of explanation\n', '.join(map(str, x_train[0]))\nExplanation: Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.\nEnd of explanation\nidx2word[23022]\nExplanation: The first word of the first review is 23022. Let's see what that is.\nEnd of explanation\n' '.join([idx2word[o] for o in x_train[0]])\nExplanation: Here's the whole review, mapped from ids to words.\nEnd of explanation\nlabels_train[:10]\nExplanation: The labels are 1 for positive, 0 for negative.\nEnd of explanation\nvocab_size = 5000\ntrn = [np.array([i if i