{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"\nhtml_splitter = RecursiveCharacterTextSplitter.from_language(\n language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0\n)\nhtml_docs = html_splitter.create_documents([html_text])\nhtml_docs\n[Document(page_content='\\n\\n ', metadata={}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html"}}},{"rowIdx":1516,"cells":{"id":{"kind":"string","value":"44526870d883-5"},"text":{"kind":"string","value":"Document(page_content='🦜️🔗 LangChain\\n \\n \\n \\n
', metadata={}),\n Document(page_content='

🦜️🔗 LangChain

', metadata={}),\n Document(page_content='

⚡ Building applications with LLMs through', metadata={}),\n Document(page_content='composability ⚡

', metadata={}),\n Document(page_content='
\\n
', metadata={}),\n Document(page_content='As an open source project in a rapidly', metadata={}),\n Document(page_content='developing field, we are extremely open to contributions.', metadata={}),\n Document(page_content='
\\n \\n', metadata={})]\nprevious\nCharacter\nnext\nNLTK\n Contents\n \nPython\nJS\nSolidity\nMarkdown\nLatex\nHTML\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html"}}},{"rowIdx":1517,"cells":{"id":{"kind":"string","value":"de368ca32158-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nspaCy\nspaCy#\nspaCy is an open-source software library for advanced natural language processing, written in the programming languages Python and Cython.\nAnother alternative to NLTK is to use Spacy tokenizer.\nHow the text is split: by spaCy tokenizer\nHow the chunk size is measured: by number of characters\n#!pip install spacy\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import SpacyTextSplitter\ntext_splitter = SpacyTextSplitter(chunk_size=1000)\ntexts = text_splitter.split_text(state_of_the_union)\nprint(texts[0])\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman.\nMembers of Congress and the Cabinet.\nJustices of the Supreme Court.\nMy fellow Americans. \nLast year COVID-19 kept us apart.\nThis year we are finally together again. \nTonight, we meet as Democrats Republicans and Independents.\nBut most importantly as Americans. \nWith a duty to one another to the American people to the Constitution. \nAnd with an unwavering resolve that freedom will always triumph over tyranny. \nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways.\nBut he badly miscalculated. \nHe thought he could roll into Ukraine and the world would roll over.\nInstead he met a wall of strength he never imagined. \nHe met the Ukrainian people. \nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.\nprevious\nRecursive Character\nnext\nTiktoken\nBy Harrison Chase"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/spacy.html"}}},{"rowIdx":1518,"cells":{"id":{"kind":"string","value":"de368ca32158-1"},"text":{"kind":"string","value":"previous\nRecursive Character\nnext\nTiktoken\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/spacy.html"}}},{"rowIdx":1519,"cells":{"id":{"kind":"string","value":"663fb253628f-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nTiktoken\nTiktoken#\ntiktoken is a fast BPE tokeniser created by OpenAI.\nHow the text is split: by tiktoken tokens\nHow the chunk size is measured: by tiktoken tokens\n#!pip install tiktoken\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import TokenTextSplitter\ntext_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)\ntexts = text_splitter.split_text(state_of_the_union)\nprint(texts[0])\nMadam Speaker, Madam Vice President, our\nprevious\nspaCy\nnext\nHugging Face tokenizer\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/tiktoken_splitter.html"}}},{"rowIdx":1520,"cells":{"id":{"kind":"string","value":"785ac476db93-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nHugging Face tokenizer\nHugging Face tokenizer#\nHugging Face has many tokenizers.\nWe use Hugging Face tokenizer, the GPT2TokenizerFast to count the text length in tokens.\nHow the text is split: by character passed in\nHow the chunk size is measured: by number of tokens calculated by the Hugging Face tokenizer\nfrom transformers import GPT2TokenizerFast\ntokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import CharacterTextSplitter\ntext_splitter = CharacterTextSplitter.from_huggingface_tokenizer(tokenizer, chunk_size=100, chunk_overlap=0)\ntexts = text_splitter.split_text(state_of_the_union)\nprint(texts[0])\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \nLast year COVID-19 kept us apart. This year we are finally together again. \nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \nWith a duty to one another to the American people to the Constitution.\nprevious\nTiktoken\nnext\ntiktoken (OpenAI) tokenizer\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/huggingface_length_function.html"}}},{"rowIdx":1521,"cells":{"id":{"kind":"string","value":"17aca0b5d2a3-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nRecursive Character\nRecursive Character#\nThis text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is [\"\\n\\n\", \"\\n\", \" \", \"\"]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\nHow the text is split: by list of characters\nHow the chunk size is measured: by number of characters\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\ntext_splitter = RecursiveCharacterTextSplitter(\n # Set a really small chunk size, just to show.\n chunk_size = 100,\n chunk_overlap = 20,\n length_function = len,\n)\ntexts = text_splitter.create_documents([state_of_the_union])\nprint(texts[0])\nprint(texts[1])\npage_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and' lookup_str='' metadata={} lookup_index=0\npage_content='of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.' lookup_str='' metadata={} lookup_index=0\ntext_splitter.split_text(state_of_the_union)[:2]\n['Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and',\n 'of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.']\nprevious\nNLTK\nnext\nspaCy\nBy Harrison Chase"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/recursive_text_splitter.html"}}},{"rowIdx":1522,"cells":{"id":{"kind":"string","value":"17aca0b5d2a3-1"},"text":{"kind":"string","value":"previous\nNLTK\nnext\nspaCy\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/recursive_text_splitter.html"}}},{"rowIdx":1523,"cells":{"id":{"kind":"string","value":"a040b66c6ffb-0"},"text":{"kind":"string","value":".ipynb\n.pdf\ntiktoken (OpenAI) tokenizer\ntiktoken (OpenAI) tokenizer#\ntiktoken is a fast BPE tokenizer created by OpenAI.\nWe can use it to estimate tokens used. It will probably be more accurate for the OpenAI models.\nHow the text is split: by character passed in\nHow the chunk size is measured: by tiktoken tokenizer\n#!pip install tiktoken\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import CharacterTextSplitter\ntext_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=100, chunk_overlap=0)\ntexts = text_splitter.split_text(state_of_the_union)\nprint(texts[0])\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \nLast year COVID-19 kept us apart. This year we are finally together again. \nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \nWith a duty to one another to the American people to the Constitution.\nprevious\nHugging Face tokenizer\nnext\nVectorstores\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/tiktoken.html"}}},{"rowIdx":1524,"cells":{"id":{"kind":"string","value":"ee7c634c6fbd-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nNLTK\nNLTK#\nThe Natural Language Toolkit, or more commonly NLTK, is a suite of libraries and programs for symbolic and statistical natural language processing (NLP) for English written in the Python programming language.\nRather than just splitting on “\\n\\n”, we can use NLTK to split based on NLTK tokenizers.\nHow the text is split: by NLTK tokenizer.\nHow the chunk size is measured:by number of characters\n#pip install nltk\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import NLTKTextSplitter\ntext_splitter = NLTKTextSplitter(chunk_size=1000)\ntexts = text_splitter.split_text(state_of_the_union)\nprint(texts[0])\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman.\nMembers of Congress and the Cabinet.\nJustices of the Supreme Court.\nMy fellow Americans.\nLast year COVID-19 kept us apart.\nThis year we are finally together again.\nTonight, we meet as Democrats Republicans and Independents.\nBut most importantly as Americans.\nWith a duty to one another to the American people to the Constitution.\nAnd with an unwavering resolve that freedom will always triumph over tyranny.\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways.\nBut he badly miscalculated.\nHe thought he could roll into Ukraine and the world would roll over.\nInstead he met a wall of strength he never imagined.\nHe met the Ukrainian people.\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/nltk.html"}}},{"rowIdx":1525,"cells":{"id":{"kind":"string","value":"ee7c634c6fbd-1"},"text":{"kind":"string","value":"Groups of citizens blocking tanks with their bodies.\nprevious\nCodeTextSplitter\nnext\nRecursive Character\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/nltk.html"}}},{"rowIdx":1526,"cells":{"id":{"kind":"string","value":"d7922252429a-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nCharacter\nCharacter#\nThis is the simplest method. This splits based on characters (by default “\\n\\n”) and measure chunk length by number of characters.\nHow the text is split: by single character\nHow the chunk size is measured: by number of characters\n# This is a long document we can split up.\nwith open('../../../state_of_the_union.txt') as f:\n state_of_the_union = f.read()\nfrom langchain.text_splitter import CharacterTextSplitter\ntext_splitter = CharacterTextSplitter( \n separator = \"\\n\\n\",\n chunk_size = 1000,\n chunk_overlap = 200,\n length_function = len,\n)\ntexts = text_splitter.create_documents([state_of_the_union])\nprint(texts[0])"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html"}}},{"rowIdx":1527,"cells":{"id":{"kind":"string","value":"d7922252429a-1"},"text":{"kind":"string","value":"print(texts[0])\npage_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' lookup_str='' metadata={} lookup_index=0\nHere’s an example of passing metadata along with the documents, notice that it is split along with the documents.\nmetadatas = [{\"document\": 1}, {\"document\": 2}]\ndocuments = text_splitter.create_documents([state_of_the_union, state_of_the_union], metadatas=metadatas)\nprint(documents[0])"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html"}}},{"rowIdx":1528,"cells":{"id":{"kind":"string","value":"d7922252429a-2"},"text":{"kind":"string","value":"print(documents[0])\npage_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' lookup_str='' metadata={'document': 1} lookup_index=0\ntext_splitter.split_text(state_of_the_union)[0]"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html"}}},{"rowIdx":1529,"cells":{"id":{"kind":"string","value":"d7922252429a-3"},"text":{"kind":"string","value":"text_splitter.split_text(state_of_the_union)[0]\n'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.'\nprevious\nGetting Started\nnext\nCodeTextSplitter\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html"}}},{"rowIdx":1530,"cells":{"id":{"kind":"string","value":"a16d2d34b47f-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nElasticSearch BM25\n Contents \nCreate New Retriever\nAdd texts (if necessary)\nUse Retriever\nElasticSearch BM25#\nElasticsearch is a distributed, RESTful search and analytics engine. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents.\nIn information retrieval, Okapi BM25 (BM is an abbreviation of best matching) is a ranking function used by search engines to estimate the relevance of documents to a given search query. It is based on the probabilistic retrieval framework developed in the 1970s and 1980s by Stephen E. Robertson, Karen Spärck Jones, and others.\nThe name of the actual ranking function is BM25. The fuller name, Okapi BM25, includes the name of the first system to use it, which was the Okapi information retrieval system, implemented at London’s City University in the 1980s and 1990s. BM25 and its newer variants, e.g. BM25F (a version of BM25 that can take document structure and anchor text into account), represent TF-IDF-like retrieval functions used in document retrieval.\nThis notebook shows how to use a retriever that uses ElasticSearch and BM25.\nFor more information on the details of BM25 see this blog post.\n#!pip install elasticsearch\nfrom langchain.retrievers import ElasticSearchBM25Retriever\nCreate New Retriever#\nelasticsearch_url=\"http://localhost:9200\"\nretriever = ElasticSearchBM25Retriever.create(elasticsearch_url, \"langchain-index-4\")\n# Alternatively, you can load an existing index\n# import elasticsearch\n# elasticsearch_url=\"http://localhost:9200\""},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/elastic_search_bm25.html"}}},{"rowIdx":1531,"cells":{"id":{"kind":"string","value":"a16d2d34b47f-1"},"text":{"kind":"string","value":"# import elasticsearch\n# elasticsearch_url=\"http://localhost:9200\"\n# retriever = ElasticSearchBM25Retriever(elasticsearch.Elasticsearch(elasticsearch_url), \"langchain-index\")\nAdd texts (if necessary)#\nWe can optionally add texts to the retriever (if they aren’t already in there)\nretriever.add_texts([\"foo\", \"bar\", \"world\", \"hello\", \"foo bar\"])\n['cbd4cb47-8d9f-4f34-b80e-ea871bc49856',\n 'f3bd2e24-76d1-4f9b-826b-ec4c0e8c7365',\n '8631bfc8-7c12-48ee-ab56-8ad5f373676e',\n '8be8374c-3253-4d87-928d-d73550a2ecf0',\n 'd79f457b-2842-4eab-ae10-77aa420b53d7']\nUse Retriever#\nWe can now use the retriever!\nresult = retriever.get_relevant_documents(\"foo\")\nresult\n[Document(page_content='foo', metadata={}),\n Document(page_content='foo bar', metadata={})]\nprevious\nDataberry\nnext\nkNN\n Contents\n \nCreate New Retriever\nAdd texts (if necessary)\nUse Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/elastic_search_bm25.html"}}},{"rowIdx":1532,"cells":{"id":{"kind":"string","value":"fec7d90c114b-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nTime Weighted VectorStore\n Contents \nLow Decay Rate\nHigh Decay Rate\nVirtual Time\nTime Weighted VectorStore#\nThis retriever uses a combination of semantic similarity and a time decay.\nThe algorithm for scoring them is:\nsemantic_similarity + (1.0 - decay_rate) ** hours_passed\nNotably, hours_passed refers to the hours passed since the object in the retriever was last accessed, not since it was created. This means that frequently accessed objects remain “fresh.”\nimport faiss\nfrom datetime import datetime, timedelta\nfrom langchain.docstore import InMemoryDocstore\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.retrievers import TimeWeightedVectorStoreRetriever\nfrom langchain.schema import Document\nfrom langchain.vectorstores import FAISS\nLow Decay Rate#\nA low decay rate (in this, to be extreme, we will set close to 0) means memories will be “remembered” for longer. A decay rate of 0 means memories never be forgotten, making this retriever equivalent to the vector lookup.\n# Define your embedding model\nembeddings_model = OpenAIEmbeddings()\n# Initialize the vectorstore as empty\nembedding_size = 1536\nindex = faiss.IndexFlatL2(embedding_size)\nvectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})\nretriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.0000000000000000000000001, k=1) \nyesterday = datetime.now() - timedelta(days=1)\nretriever.add_documents([Document(page_content=\"hello world\", metadata={\"last_accessed_at\": yesterday})])\nretriever.add_documents([Document(page_content=\"hello foo\")])"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html"}}},{"rowIdx":1533,"cells":{"id":{"kind":"string","value":"fec7d90c114b-1"},"text":{"kind":"string","value":"retriever.add_documents([Document(page_content=\"hello foo\")])\n['d7f85756-2371-4bdf-9140-052780a0f9b3']\n# \"Hello World\" is returned first because it is most salient, and the decay rate is close to 0., meaning it's still recent enough\nretriever.get_relevant_documents(\"hello world\")\n[Document(page_content='hello world', metadata={'last_accessed_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 678341), 'created_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 279596), 'buffer_idx': 0})]\nHigh Decay Rate#\nWith a high decay rate (e.g., several 9’s), the recency score quickly goes to 0! If you set this all the way to 1, recency is 0 for all objects, once again making this equivalent to a vector lookup.\n# Define your embedding model\nembeddings_model = OpenAIEmbeddings()\n# Initialize the vectorstore as empty\nembedding_size = 1536\nindex = faiss.IndexFlatL2(embedding_size)\nvectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})\nretriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.999, k=1) \nyesterday = datetime.now() - timedelta(days=1)\nretriever.add_documents([Document(page_content=\"hello world\", metadata={\"last_accessed_at\": yesterday})])\nretriever.add_documents([Document(page_content=\"hello foo\")])\n['40011466-5bbe-4101-bfd1-e22e7f505de2']"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html"}}},{"rowIdx":1534,"cells":{"id":{"kind":"string","value":"fec7d90c114b-2"},"text":{"kind":"string","value":"# \"Hello Foo\" is returned first because \"hello world\" is mostly forgotten\nretriever.get_relevant_documents(\"hello world\")\n[Document(page_content='hello foo', metadata={'last_accessed_at': datetime.datetime(2023, 4, 16, 22, 9, 2, 494798), 'created_at': datetime.datetime(2023, 4, 16, 22, 9, 2, 178722), 'buffer_idx': 1})]\nVirtual Time#\nUsing some utils in LangChain, you can mock out the time component\nfrom langchain.utils import mock_now\nimport datetime\n# Notice the last access time is that date time\nwith mock_now(datetime.datetime(2011, 2, 3, 10, 11)):\n print(retriever.get_relevant_documents(\"hello world\"))\n[Document(page_content='hello world', metadata={'last_accessed_at': MockDateTime(2011, 2, 3, 10, 11), 'created_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 279596), 'buffer_idx': 0})]\nprevious\nTF-IDF\nnext\nVectorStore\n Contents\n \nLow Decay Rate\nHigh Decay Rate\nVirtual Time\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html"}}},{"rowIdx":1535,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nCohere Reranker\n Contents \nSet up the base vector store retriever\nDoing reranking with CohereRerank\nCohere Reranker#\nCohere is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\nThis notebook shows how to use Cohere’s rerank endpoint in a retriever. This builds on top of ideas in the ContextualCompressionRetriever.\n#!pip install cohere\n#!pip install faiss\n# OR (depending on Python version)\n#!pip install faiss-cpu\n# get a new token: https://dashboard.cohere.ai/\nimport os\nimport getpass\nos.environ['COHERE_API_KEY'] = getpass.getpass('Cohere API Key:')\nos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\n# Helper function for printing docs\ndef pretty_print_docs(docs):\n print(f\"\\n{'-' * 100}\\n\".join([f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]))\nSet up the base vector store retriever#\nLet’s start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs.\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.document_loaders import TextLoader\nfrom langchain.vectorstores import FAISS\ndocuments = TextLoader('../../../state_of_the_union.txt').load()\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\ntexts = text_splitter.split_documents(documents)"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1536,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-1"},"text":{"kind":"string","value":"texts = text_splitter.split_documents(documents)\nretriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever(search_kwargs={\"k\": 20})\nquery = \"What did the president say about Ketanji Brown Jackson\"\ndocs = retriever.get_relevant_documents(query)\npretty_print_docs(docs)\nDocument 1:\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n----------------------------------------------------------------------------------------------------\nDocument 2:\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.\n----------------------------------------------------------------------------------------------------\nDocument 3:\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n----------------------------------------------------------------------------------------------------\nDocument 4:\nHe met the Ukrainian people. \nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1537,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-2"},"text":{"kind":"string","value":"Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.\n----------------------------------------------------------------------------------------------------\nDocument 5:\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \nI’ve worked on these issues a long time. \nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \nSo let’s not abandon our streets. Or choose between safety and equal justice.\n----------------------------------------------------------------------------------------------------\nDocument 6:\nVice President Harris and I ran for office with a new economic vision for America. \nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up \nand the middle out, not from the top down. \nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \nAmerica used to have the best roads, bridges, and airports on Earth. \nNow our infrastructure is ranked 13th in the world.\n----------------------------------------------------------------------------------------------------\nDocument 7:\nAnd tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \nBy the end of this year, the deficit will be down to less than half what it was before I took office. \nThe only president ever to cut the deficit by more than one trillion dollars in a single year. \nLowering your costs also means demanding more competition. \nI’m a capitalist, but capitalism without competition isn’t capitalism. \nIt’s exploitation—and it drives up prices."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1538,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-3"},"text":{"kind":"string","value":"It’s exploitation—and it drives up prices.\n----------------------------------------------------------------------------------------------------\nDocument 8:\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \nVice President Harris and I ran for office with a new economic vision for America.\n----------------------------------------------------------------------------------------------------\nDocument 9:\nAll told, we created 369,000 new manufacturing jobs in America just last year. \nPowered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. \nAs Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \nIt’s time. \nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.\n----------------------------------------------------------------------------------------------------\nDocument 10:\nI’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \nAnd fourth, let’s end cancer as we know it. \nThis is personal to me and Jill, to Kamala, and to so many of you. \nCancer is the #2 cause of death in America–second only to heart disease.\n----------------------------------------------------------------------------------------------------\nDocument 11:\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \nThe pandemic has been punishing."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1539,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-4"},"text":{"kind":"string","value":"The pandemic has been punishing. \nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \nI understand.\n----------------------------------------------------------------------------------------------------\nDocument 12:\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \nLast year COVID-19 kept us apart. This year we are finally together again. \nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \nWith a duty to one another to the American people to the Constitution. \nAnd with an unwavering resolve that freedom will always triumph over tyranny.\n----------------------------------------------------------------------------------------------------\nDocument 13:\nI know. \nOne of those soldiers was my son Major Beau Biden. \nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \nBut I’m committed to finding out everything we can. \nCommitted to military families like Danielle Robinson from Ohio. \nThe widow of Sergeant First Class Heath Robinson. \nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq.\n----------------------------------------------------------------------------------------------------\nDocument 14:\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \nFirst, beat the opioid epidemic. \nThere is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.\n----------------------------------------------------------------------------------------------------\nDocument 15:\nThird, support our veterans. \nVeterans are the best of us."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1540,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-5"},"text":{"kind":"string","value":"Third, support our veterans. \nVeterans are the best of us. \nI’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. \nMy administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. \nOur troops in Iraq and Afghanistan faced many dangers.\n----------------------------------------------------------------------------------------------------\nDocument 16:\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \nAnd I know you’re tired, frustrated, and exhausted. \nBut I also know this.\n----------------------------------------------------------------------------------------------------\nDocument 17:\nNow is the hour. \nOur moment of responsibility. \nOur test of resolve and conscience, of history itself. \nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \nWell I know this nation. \nWe will meet the test. \nTo protect freedom and liberty, to expand fairness and opportunity. \nWe will save democracy. \nAs hard as these times have been, I am more optimistic about America today than I have been my whole life.\n----------------------------------------------------------------------------------------------------\nDocument 18:\nHe didn’t know how to stop fighting, and neither did she. \nThrough her pain she found purpose to demand we do better. \nTonight, Danielle—we are. \nThe VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \nAnd tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers.\n----------------------------------------------------------------------------------------------------\nDocument 19:"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1541,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-6"},"text":{"kind":"string","value":"----------------------------------------------------------------------------------------------------\nDocument 19:\nI understand. \nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan. \nBecause people were hurting. We needed to act, and we did. \nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis.\n----------------------------------------------------------------------------------------------------\nDocument 20:\nSo let’s not abandon our streets. Or choose between safety and equal justice. \nLet’s come together to protect our communities, restore trust, and hold law enforcement accountable. \nThat’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers.\nDoing reranking with CohereRerank#\nNow let’s wrap our base retriever with a ContextualCompressionRetriever. We’ll add an CohereRerank, uses the Cohere rerank endpoint to rerank the returned results.\nfrom langchain.llms import OpenAI\nfrom langchain.retrievers import ContextualCompressionRetriever\nfrom langchain.retrievers.document_compressors import CohereRerank\nllm = OpenAI(temperature=0)\ncompressor = CohereRerank()\ncompression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever)\ncompressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\npretty_print_docs(compressed_docs)\nDocument 1:\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1542,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-7"},"text":{"kind":"string","value":"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n----------------------------------------------------------------------------------------------------\nDocument 2:\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \nI’ve worked on these issues a long time. \nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \nSo let’s not abandon our streets. Or choose between safety and equal justice.\n----------------------------------------------------------------------------------------------------\nDocument 3:\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\nYou can of course use this retriever within a QA pipeline\nfrom langchain.chains import RetrievalQA\nchain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0), retriever=compression_retriever)\nchain({\"query\": query})\n{'query': 'What did the president say about Ketanji Brown Jackson',\n 'result': \" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she is a consensus builder who has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\"}\nprevious\nSelf-querying with Chroma\nnext\nContextual Compression\n Contents"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1543,"cells":{"id":{"kind":"string","value":"b7a9c4399cc4-8"},"text":{"kind":"string","value":"previous\nSelf-querying with Chroma\nnext\nContextual Compression\n Contents\n \nSet up the base vector store retriever\nDoing reranking with CohereRerank\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html"}}},{"rowIdx":1544,"cells":{"id":{"kind":"string","value":"fd71ef51b801-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nPubMed Retriever\nPubMed Retriever#\nThis notebook goes over how to use PubMed as a retriever\nPubMed® comprises more than 35 million citations for biomedical literature from MEDLINE, life science journals, and online books. Citations may include links to full text content from PubMed Central and publisher web sites.\nfrom langchain.retrievers import PubMedRetriever\nretriever = PubMedRetriever()\nretriever.get_relevant_documents(\"chatgpt\")\n[Document(page_content='', metadata={'uid': '37268021', 'title': 'Dermatology in the wake of an AI revolution: who gets a say?', 'pub_date': '2023May31'}),\n Document(page_content='', metadata={'uid': '37267643', 'title': 'What is ChatGPT and what do we do with it? Implications of the age of AI for nursing and midwifery practice and education: An editorial.', 'pub_date': '2023May30'}),\n Document(page_content='The nursing field has undergone notable changes over time and is projected to undergo further modifications in the future, owing to the advent of sophisticated technologies and growing healthcare needs. The advent of ChatGPT, an AI-powered language model, is expected to exert a significant influence on the nursing profession, specifically in the domains of patient care and instruction. The present article delves into the ramifications of ChatGPT within the nursing domain and accentuates its capacity and constraints to transform the discipline.', metadata={'uid': '37266721', 'title': 'The Impact of ChatGPT on the Nursing Profession: Revolutionizing Patient Care and Education.', 'pub_date': '2023Jun02'})]"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pubmed.html"}}},{"rowIdx":1545,"cells":{"id":{"kind":"string","value":"fd71ef51b801-1"},"text":{"kind":"string","value":"previous\nPinecone Hybrid Search\nnext\nSelf-querying with Qdrant\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pubmed.html"}}},{"rowIdx":1546,"cells":{"id":{"kind":"string","value":"58a9730fb62e-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nAWS Kendra\n Contents \nUsing the AWS Kendra Index Retriever\nAWS Kendra#\nAWS Kendra is an intelligent search service provided by Amazon Web Services (AWS). It utilizes advanced natural language processing (NLP) and machine learning algorithms to enable powerful search capabilities across various data sources within an organization. Kendra is designed to help users find the information they need quickly and accurately, improving productivity and decision-making.\nWith Kendra, users can search across a wide range of content types, including documents, FAQs, knowledge bases, manuals, and websites. It supports multiple languages and can understand complex queries, synonyms, and contextual meanings to provide highly relevant search results.\nUsing the AWS Kendra Index Retriever#\n#!pip install boto3\nimport boto3\nfrom langchain.retrievers import AwsKendraIndexRetriever\nCreate New Retriever\nkclient = boto3.client('kendra', region_name=\"us-east-1\")\nretriever = AwsKendraIndexRetriever(\n kclient=kclient,\n kendraindex=\"kendraindex\",\n)\nNow you can use retrieved documents from AWS Kendra Index\nretriever.get_relevant_documents(\"what is langchain\")\nprevious\nArxiv\nnext\nAzure Cognitive Search\n Contents\n \nUsing the AWS Kendra Index Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/aws_kendra_index_retriever.html"}}},{"rowIdx":1547,"cells":{"id":{"kind":"string","value":"2b8d91c15960-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nSVM\n Contents \nCreate New Retriever with Texts\nUse Retriever\nSVM#\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\nThis notebook goes over how to use a retriever that under the hood uses an SVM using scikit-learn package.\nLargely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb\n#!pip install scikit-learn\n#!pip install lark\nWe want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\nimport os\nimport getpass\nos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\nfrom langchain.retrievers import SVMRetriever\nfrom langchain.embeddings import OpenAIEmbeddings\nCreate New Retriever with Texts#\nretriever = SVMRetriever.from_texts([\"foo\", \"bar\", \"world\", \"hello\", \"foo bar\"], OpenAIEmbeddings())\nUse Retriever#\nWe can now use the retriever!\nresult = retriever.get_relevant_documents(\"foo\")\nresult\n[Document(page_content='foo', metadata={}),\n Document(page_content='foo bar', metadata={}),\n Document(page_content='hello', metadata={}),\n Document(page_content='world', metadata={})]\nprevious\nSelf-querying\nnext\nTF-IDF\n Contents\n \nCreate New Retriever with Texts\nUse Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/svm.html"}}},{"rowIdx":1548,"cells":{"id":{"kind":"string","value":"6fdfbdafed2d-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nkNN\n Contents \nCreate New Retriever with Texts\nUse Retriever\nkNN#\nIn statistics, the k-nearest neighbors algorithm (k-NN) is a non-parametric supervised learning method first developed by Evelyn Fix and Joseph Hodges in 1951, and later expanded by Thomas Cover. It is used for classification and regression.\nThis notebook goes over how to use a retriever that under the hood uses an kNN.\nLargely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb\nfrom langchain.retrievers import KNNRetriever\nfrom langchain.embeddings import OpenAIEmbeddings\nCreate New Retriever with Texts#\nretriever = KNNRetriever.from_texts([\"foo\", \"bar\", \"world\", \"hello\", \"foo bar\"], OpenAIEmbeddings())\nUse Retriever#\nWe can now use the retriever!\nresult = retriever.get_relevant_documents(\"foo\")\nresult\n[Document(page_content='foo', metadata={}),\n Document(page_content='foo bar', metadata={}),\n Document(page_content='hello', metadata={}),\n Document(page_content='bar', metadata={})]\nprevious\nElasticSearch BM25\nnext\nLOTR (Merger Retriever)\n Contents\n \nCreate New Retriever with Texts\nUse Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/knn.html"}}},{"rowIdx":1549,"cells":{"id":{"kind":"string","value":"7ebc8806f552-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nAzure Cognitive Search\n Contents \nSet up Azure Cognitive Search\nUsing the Azure Cognitive Search Retriever\nAzure Cognitive Search#\nAzure Cognitive Search (formerly known as Azure Search) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.\nSearch is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you’ll work with the following capabilities:\nA search engine for full text search over a search index containing user-owned content\nRich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation\nRich query syntax for text search, fuzzy search, autocomplete, geo-search and more\nProgrammability through REST APIs and client libraries in Azure SDKs\nAzure integration at the data layer, machine learning layer, and AI (Cognitive Services)\nThis notebook shows how to use Azure Cognitive Search (ACS) within LangChain.\nSet up Azure Cognitive Search#\nTo set up ACS, please follow the instrcutions here.\nPlease note\nthe name of your ACS service,\nthe name of your ACS index,\nyour API key.\nYour API key can be either Admin or Query key, but as we only read data it is recommended to use a Query key.\nUsing the Azure Cognitive Search Retriever#\nimport os\nfrom langchain.retrievers import AzureCognitiveSearchRetriever\nSet Service Name, Index Name and API key as environment variables (alternatively, you can pass them as arguments to AzureCognitiveSearchRetriever).\nos.environ[\"AZURE_COGNITIVE_SEARCH_SERVICE_NAME\"] = \"\"\nos.environ[\"AZURE_COGNITIVE_SEARCH_INDEX_NAME\"] =\"\""},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/azure_cognitive_search.html"}}},{"rowIdx":1550,"cells":{"id":{"kind":"string","value":"7ebc8806f552-1"},"text":{"kind":"string","value":"os.environ[\"AZURE_COGNITIVE_SEARCH_API_KEY\"] = \"\"\nCreate the Retriever\nretriever = AzureCognitiveSearchRetriever(content_key=\"content\")\nNow you can use retrieve documents from Azure Cognitive Search\nretriever.get_relevant_documents(\"what is langchain\")\nprevious\nAWS Kendra\nnext\nChatGPT Plugin\n Contents\n \nSet up Azure Cognitive Search\nUsing the Azure Cognitive Search Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/azure_cognitive_search.html"}}},{"rowIdx":1551,"cells":{"id":{"kind":"string","value":"8910a72533a0-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nTF-IDF\n Contents \nCreate New Retriever with Texts\nCreate a New Retriever with Documents\nUse Retriever\nTF-IDF#\nTF-IDF means term-frequency times inverse document-frequency.\nThis notebook goes over how to use a retriever that under the hood uses TF-IDF using scikit-learn package.\nFor more information on the details of TF-IDF see this blog post.\n# !pip install scikit-learn\nfrom langchain.retrievers import TFIDFRetriever\nCreate New Retriever with Texts#\nretriever = TFIDFRetriever.from_texts([\"foo\", \"bar\", \"world\", \"hello\", \"foo bar\"])\nCreate a New Retriever with Documents#\nYou can now create a new retriever with the documents you created.\nfrom langchain.schema import Document\nretriever = TFIDFRetriever.from_documents([Document(page_content=\"foo\"), Document(page_content=\"bar\"), Document(page_content=\"world\"), Document(page_content=\"hello\"), Document(page_content=\"foo bar\")])\nUse Retriever#\nWe can now use the retriever!\nresult = retriever.get_relevant_documents(\"foo\")\nresult\n[Document(page_content='foo', metadata={}),\n Document(page_content='foo bar', metadata={}),\n Document(page_content='hello', metadata={}),\n Document(page_content='world', metadata={})]\nprevious\nSVM\nnext\nTime Weighted VectorStore\n Contents\n \nCreate New Retriever with Texts\nCreate a New Retriever with Documents\nUse Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/tf_idf.html"}}},{"rowIdx":1552,"cells":{"id":{"kind":"string","value":"9221c2b71a88-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nVespa\nVespa#\nVespa is a fully featured search engine and vector database. It supports vector search (ANN), lexical search, and search in structured data, all in the same query.\nThis notebook shows how to use Vespa.ai as a LangChain retriever.\nIn order to create a retriever, we use pyvespa to\ncreate a connection a Vespa service.\n#!pip install pyvespa\nfrom vespa.application import Vespa\nvespa_app = Vespa(url=\"https://doc-search.vespa.oath.cloud\")\nThis creates a connection to a Vespa service, here the Vespa documentation search service.\nUsing pyvespa package, you can also connect to a\nVespa Cloud instance\nor a local\nDocker instance.\nAfter connecting to the service, you can set up the retriever:\nfrom langchain.retrievers.vespa_retriever import VespaRetriever\nvespa_query_body = {\n \"yql\": \"select content from paragraph where userQuery()\",\n \"hits\": 5,\n \"ranking\": \"documentation\",\n \"locale\": \"en-us\"\n}\nvespa_content_field = \"content\"\nretriever = VespaRetriever(vespa_app, vespa_query_body, vespa_content_field)\nThis sets up a LangChain retriever that fetches documents from the Vespa application.\nHere, up to 5 results are retrieved from the content field in the paragraph document type,\nusing doumentation as the ranking method. The userQuery() is replaced with the actual query\npassed from LangChain.\nPlease refer to the pyvespa documentation\nfor more information.\nNow you can return the results and continue using the results in LangChain.\nretriever.get_relevant_documents(\"what is vespa?\")"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vespa.html"}}},{"rowIdx":1553,"cells":{"id":{"kind":"string","value":"9221c2b71a88-1"},"text":{"kind":"string","value":"retriever.get_relevant_documents(\"what is vespa?\")\nprevious\nVectorStore\nnext\nWeaviate Hybrid Search\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vespa.html"}}},{"rowIdx":1554,"cells":{"id":{"kind":"string","value":"440a546753af-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nZep\n Contents \nRetriever Example\nInitialize the Zep Chat Message History Class and add a chat message history to the memory store\nUse the Zep Retriever to vector search over the Zep memory\nZep#\nZep - A long-term memory store for LLM applications.\nMore on Zep:\nZep stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs.\nKey Features:\nLong-term memory persistence, with access to historical messages irrespective of your summarization strategy.\nAuto-summarization of memory messages based on a configurable message window. A series of summaries are stored, providing flexibility for future summarization strategies.\nVector search over memories, with messages automatically embedded on creation.\nAuto-token counting of memories and summaries, allowing finer-grained control over prompt assembly.\nPython and JavaScript SDKs.\nZep’s Go Extractor model is easily extensible, with a simple, clean interface available to build new enrichment functionality, such as summarizers, entity extractors, embedders, and more.\nZep project: getzep/zep\nRetriever Example#\nThis notebook demonstrates how to search historical chat message histories using the Zep Long-term Memory Store.\nWe’ll demonstrate:\nAdding conversation history to the Zep memory store.\nVector search over the conversation history.\nfrom langchain.memory.chat_message_histories import ZepChatMessageHistory\nfrom langchain.schema import HumanMessage, AIMessage\nfrom uuid import uuid4\n# Set this to your Zep server URL\nZEP_API_URL = \"http://localhost:8000\"\nInitialize the Zep Chat Message History Class and add a chat message history to the memory store#"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1555,"cells":{"id":{"kind":"string","value":"440a546753af-1"},"text":{"kind":"string","value":"Initialize the Zep Chat Message History Class and add a chat message history to the memory store#\nNOTE: Unlike other Retrievers, the content returned by the Zep Retriever is session/user specific. A session_id is required when instantiating the Retriever.\nsession_id = str(uuid4()) # This is a unique identifier for the user/session\n# Set up Zep Chat History. We'll use this to add chat histories to the memory store\nzep_chat_history = ZepChatMessageHistory(\n session_id=session_id,\n url=ZEP_API_URL,\n)\n# Preload some messages into the memory. The default message window is 12 messages. We want to push beyond this to demonstrate auto-summarization.\ntest_history = [\n {\"role\": \"human\", \"content\": \"Who was Octavia Butler?\"},\n {\n \"role\": \"ai\",\n \"content\": (\n \"Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American\"\n \" science fiction author.\"\n ),\n },\n {\"role\": \"human\", \"content\": \"Which books of hers were made into movies?\"},\n {\n \"role\": \"ai\",\n \"content\": (\n \"The most well-known adaptation of Octavia Butler's work is the FX series\"\n \" Kindred, based on her novel of the same name.\"\n ),\n },\n {\"role\": \"human\", \"content\": \"Who were her contemporaries?\"},\n {\n \"role\": \"ai\",\n \"content\": (\n \"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R.\"\n \" Delany, and Joanna Russ.\"\n ),\n },"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1556,"cells":{"id":{"kind":"string","value":"440a546753af-2"},"text":{"kind":"string","value":"\" Delany, and Joanna Russ.\"\n ),\n },\n {\"role\": \"human\", \"content\": \"What awards did she win?\"},\n {\n \"role\": \"ai\",\n \"content\": (\n \"Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur\"\n \" Fellowship.\"\n ),\n },\n {\n \"role\": \"human\",\n \"content\": \"Which other women sci-fi writers might I want to read?\",\n },\n {\n \"role\": \"ai\",\n \"content\": \"You might want to read Ursula K. Le Guin or Joanna Russ.\",\n },\n {\n \"role\": \"human\",\n \"content\": (\n \"Write a short synopsis of Butler's book, Parable of the Sower. What is it\"\n \" about?\"\n ),\n },\n {\n \"role\": \"ai\",\n \"content\": (\n \"Parable of the Sower is a science fiction novel by Octavia Butler,\"\n \" published in 1993. It follows the story of Lauren Olamina, a young woman\"\n \" living in a dystopian future where society has collapsed due to\"\n \" environmental disasters, poverty, and violence.\"\n ),\n },\n]\nfor msg in test_history:\n zep_chat_history.append(\n HumanMessage(content=msg[\"content\"])\n if msg[\"role\"] == \"human\"\n else AIMessage(content=msg[\"content\"])\n )\nUse the Zep Retriever to vector search over the Zep memory#\nZep provides native vector search over historical conversation memory. Embedding happens automatically."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1557,"cells":{"id":{"kind":"string","value":"440a546753af-3"},"text":{"kind":"string","value":"Zep provides native vector search over historical conversation memory. Embedding happens automatically.\nNOTE: Embedding of messages occurs asynchronously, so the first query may not return results. Subsequent queries will return results as the embeddings are generated.\nfrom langchain.retrievers import ZepRetriever\nzep_retriever = ZepRetriever(\n session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n url=ZEP_API_URL,\n top_k=5,\n)\nawait zep_retriever.aget_relevant_documents(\"Who wrote Parable of the Sower?\")\n[Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759001673780126, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602262941130749, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1558,"cells":{"id":{"kind":"string","value":"440a546753af-4"},"text":{"kind":"string","value":"Document(page_content='Who were her contemporaries?', metadata={'score': 0.757553366415519, 'uuid': '41f9c41a-a205-41e1-b48b-a0a4cd943fc8', 'created_at': '2023-05-25T15:03:30.243995Z', 'role': 'human', 'token_count': 8}),\n Document(page_content='Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American science fiction author.', metadata={'score': 0.7546211059317948, 'uuid': '34678311-0098-4f1a-8fd4-5615ac692deb', 'created_at': '2023-05-25T15:03:30.231427Z', 'role': 'ai', 'token_count': 31}),\n Document(page_content='Which books of hers were made into movies?', metadata={'score': 0.7496714959247069, 'uuid': '18046c3a-9666-4d3e-b4f0-43d1394732b7', 'created_at': '2023-05-25T15:03:30.236837Z', 'role': 'human', 'token_count': 11})]\nWe can also use the Zep sync API to retrieve results:\nzep_retriever.get_relevant_documents(\"Who wrote Parable of the Sower?\")"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1559,"cells":{"id":{"kind":"string","value":"440a546753af-5"},"text":{"kind":"string","value":"[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897321402776546, 'uuid': '1c09603a-52c1-40d7-9d69-29f26256029c', 'created_at': '2023-05-25T15:03:30.268257Z', 'role': 'ai', 'token_count': 56}),\n Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8857628682610436, 'uuid': 'f6706e8c-6c91-452f-8c1b-9559fd924657', 'created_at': '2023-05-25T15:03:30.265302Z', 'role': 'human', 'token_count': 23}),\n Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759670375149477, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1560,"cells":{"id":{"kind":"string","value":"440a546753af-6"},"text":{"kind":"string","value":"Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602854653476563, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7595293992240313, 'uuid': 'f22f2498-6118-4c74-8718-aa89ccd7e3d6', 'created_at': '2023-05-25T15:03:30.261198Z', 'role': 'ai', 'token_count': 18})]\nprevious\nWikipedia\nnext\nChains\n Contents\n \nRetriever Example\nInitialize the Zep Chat Message History Class and add a chat message history to the memory store\nUse the Zep Retriever to vector search over the Zep memory\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html"}}},{"rowIdx":1561,"cells":{"id":{"kind":"string","value":"15c22005e0bd-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nSelf-querying with Qdrant\n Contents \nCreating a Qdrant vectorstore\nCreating our self-querying retriever\nTesting it out\nFilter k\nSelf-querying with Qdrant#\nQdrant (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. Qdrant is tailored to extended filtering support. It makes it useful\nIn the notebook we’ll demo the SelfQueryRetriever wrapped around a Qdrant vector store.\nCreating a Qdrant vectorstore#\nFirst we’ll want to create a Chroma VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies.\nNOTE: The self-query retriever requires you to have lark installed (pip install lark). We also need the qdrant-client package.\n#!pip install lark qdrant-client\nWe want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n# import os\n# import getpass\n# os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\nfrom langchain.schema import Document\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import Qdrant\nembeddings = OpenAIEmbeddings()\ndocs = [\n Document(page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\", metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"}),\n Document(page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\", metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1562,"cells":{"id":{"kind":"string","value":"15c22005e0bd-1"},"text":{"kind":"string","value":"Document(page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\", metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6}),\n Document(page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\", metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3}),\n Document(page_content=\"Toys come alive and have a blast doing so\", metadata={\"year\": 1995, \"genre\": \"animated\"}),\n Document(page_content=\"Three men walk into the Zone, three men walk out of the Zone\", metadata={\"year\": 1979, \"rating\": 9.9, \"director\": \"Andrei Tarkovsky\", \"genre\": \"science fiction\", \"rating\": 9.9})\n]\nvectorstore = Qdrant.from_documents(\n docs, \n embeddings, \n location=\":memory:\", # Local mode with in-memory storage only\n collection_name=\"my_documents\",\n)\nCreating our self-querying retriever#\nNow we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents.\nfrom langchain.llms import OpenAI\nfrom langchain.retrievers.self_query.base import SelfQueryRetriever\nfrom langchain.chains.query_constructor.base import AttributeInfo\nmetadata_field_info=[\n AttributeInfo(\n name=\"genre\",\n description=\"The genre of the movie\", \n type=\"string or list[string]\", \n ),\n AttributeInfo(\n name=\"year\",\n description=\"The year the movie was released\", \n type=\"integer\", \n ),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1563,"cells":{"id":{"kind":"string","value":"15c22005e0bd-2"},"text":{"kind":"string","value":"type=\"integer\", \n ),\n AttributeInfo(\n name=\"director\",\n description=\"The name of the movie director\", \n type=\"string\", \n ),\n AttributeInfo(\n name=\"rating\",\n description=\"A 1-10 rating for the movie\",\n type=\"float\"\n ),\n]\ndocument_content_description = \"Brief summary of a movie\"\nllm = OpenAI(temperature=0)\nretriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True)\nTesting it out#\nAnd now we can try actually using our retriever!\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"What are some movies about dinosaurs\")\nquery='dinosaur' filter=None limit=None\n[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}),\n Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'}),\n Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6})]\n# This example only specifies a filter\nretriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1564,"cells":{"id":{"kind":"string","value":"15c22005e0bd-3"},"text":{"kind":"string","value":"query=' ' filter=Comparison(comparator=, attribute='rating', value=8.5) limit=None\n[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'}),\n Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6})]\n# This example specifies a query and a filter\nretriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")\nquery='women' filter=Comparison(comparator=, attribute='director', value='Greta Gerwig') limit=None\n[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3})]\n# This example specifies a composite filter\nretriever.get_relevant_documents(\"What's a highly rated (above 8.5) science fiction film?\")\nquery=' ' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='rating', value=8.5), Comparison(comparator=, attribute='genre', value='science fiction')]) limit=None"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1565,"cells":{"id":{"kind":"string","value":"15c22005e0bd-4"},"text":{"kind":"string","value":"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})]\n# This example specifies a query and composite filter\nretriever.get_relevant_documents(\"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\")\nquery='toys' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='year', value=1990), Comparison(comparator=, attribute='year', value=2005), Comparison(comparator=, attribute='genre', value='animated')]) limit=None\n[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]\nFilter k#\nWe can also use the self query retriever to specify k: the number of documents to fetch.\nWe can do this by passing enable_limit=True to the constructor.\nretriever = SelfQueryRetriever.from_llm(\n llm, \n vectorstore, \n document_content_description, \n metadata_field_info, \n enable_limit=True,\n verbose=True\n)\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"what are two movies about dinosaurs\")\nquery='dinosaur' filter=None limit=2\n[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1566,"cells":{"id":{"kind":"string","value":"15c22005e0bd-5"},"text":{"kind":"string","value":"Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]\nprevious\nPubMed Retriever\nnext\nSelf-querying\n Contents\n \nCreating a Qdrant vectorstore\nCreating our self-querying retriever\nTesting it out\nFilter k\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html"}}},{"rowIdx":1567,"cells":{"id":{"kind":"string","value":"7751380b00c9-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nChatGPT Plugin\n Contents \nUsing the ChatGPT Retriever Plugin\nChatGPT Plugin#\nOpenAI plugins connect ChatGPT to third-party applications. These plugins enable ChatGPT to interact with APIs defined by developers, enhancing ChatGPT’s capabilities and allowing it to perform a wide range of actions.\nPlugins can allow ChatGPT to do things like:\nRetrieve real-time information; e.g., sports scores, stock prices, the latest news, etc.\nRetrieve knowledge-base information; e.g., company docs, personal notes, etc.\nPerform actions on behalf of the user; e.g., booking a flight, ordering food, etc.\nThis notebook shows how to use the ChatGPT Retriever Plugin within LangChain.\n# STEP 1: Load\n# Load documents using LangChain's DocumentLoaders\n# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\nfrom langchain.document_loaders.csv_loader import CSVLoader\nloader = CSVLoader(file_path='../../document_loaders/examples/example_data/mlb_teams_2012.csv')\ndata = loader.load()\n# STEP 2: Convert\n# Convert Document to format expected by https://github.com/openai/chatgpt-retrieval-plugin\nfrom typing import List\nfrom langchain.docstore.document import Document\nimport json\ndef write_json(path: str, documents: List[Document])-> None:\n results = [{\"text\": doc.page_content} for doc in documents]\n with open(path, \"w\") as f:\n json.dump(results, f, indent=2)\nwrite_json(\"foo.json\", data)\n# STEP 3: Use\n# Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html"}}},{"rowIdx":1568,"cells":{"id":{"kind":"string","value":"7751380b00c9-1"},"text":{"kind":"string","value":"Using the ChatGPT Retriever Plugin#\nOkay, so we’ve created the ChatGPT Retriever Plugin, but how do we actually use it?\nThe below code walks through how to do that.\nWe want to use ChatGPTPluginRetriever so we have to get the OpenAI API Key.\nimport os\nimport getpass\nos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\nfrom langchain.retrievers import ChatGPTPluginRetriever\nretriever = ChatGPTPluginRetriever(url=\"http://0.0.0.0:8000\", bearer_token=\"foo\")\nretriever.get_relevant_documents(\"alice's phone number\")\n[Document(page_content=\"This is Alice's phone number: 123-456-7890\", lookup_str='', metadata={'id': '456_0', 'metadata': {'source': 'email', 'source_id': '567', 'url': None, 'created_at': '1609592400.0', 'author': 'Alice', 'document_id': '456'}, 'embedding': None, 'score': 0.925571561}, lookup_index=0),\n Document(page_content='This is a document about something', lookup_str='', metadata={'id': '123_0', 'metadata': {'source': 'file', 'source_id': 'https://example.com/doc1', 'url': 'https://example.com/doc1', 'created_at': '1609502400.0', 'author': 'Alice', 'document_id': '123'}, 'embedding': None, 'score': 0.6987589}, lookup_index=0),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html"}}},{"rowIdx":1569,"cells":{"id":{"kind":"string","value":"7751380b00c9-2"},"text":{"kind":"string","value":"Document(page_content='Team: Angels \"Payroll (millions)\": 154.49 \"Wins\": 89', lookup_str='', metadata={'id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631_0', 'metadata': {'source': None, 'source_id': None, 'url': None, 'created_at': None, 'author': None, 'document_id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631'}, 'embedding': None, 'score': 0.697888613}, lookup_index=0)]\nprevious\nAzure Cognitive Search\nnext\nSelf-querying with Chroma\n Contents\n \nUsing the ChatGPT Retriever Plugin\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html"}}},{"rowIdx":1570,"cells":{"id":{"kind":"string","value":"989b78f99164-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nContextual Compression\n Contents \nContextual Compression\nUsing a vanilla vector store retriever\nAdding contextual compression with an LLMChainExtractor\nMore built-in compressors: filters\nLLMChainFilter\nEmbeddingsFilter\nStringing compressors and document transformers together\nContextual Compression#\nThis notebook introduces the concept of DocumentCompressors and the ContextualCompressionRetriever. The core idea is simple: given a specific query, we should be able to return only the documents relevant to that query, and only the parts of those documents that are relevant. The ContextualCompressionsRetriever is a wrapper for another retriever that iterates over the initial output of the base retriever and filters and compresses those initial documents, so that only the most relevant information is returned.\n# Helper function for printing docs\ndef pretty_print_docs(docs):\n print(f\"\\n{'-' * 100}\\n\".join([f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]))\nUsing a vanilla vector store retriever#\nLet’s start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can see that given an example question our retriever returns one or two relevant docs and a few irrelevant docs. And even the relevant docs have a lot of irrelevant information in them.\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.document_loaders import TextLoader\nfrom langchain.vectorstores import FAISS\ndocuments = TextLoader('../../../state_of_the_union.txt').load()\ntext_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\ntexts = text_splitter.split_documents(documents)"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1571,"cells":{"id":{"kind":"string","value":"989b78f99164-1"},"text":{"kind":"string","value":"texts = text_splitter.split_documents(documents)\nretriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()\ndocs = retriever.get_relevant_documents(\"What did the president say about Ketanji Brown Jackson\")\npretty_print_docs(docs)\nDocument 1:\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n----------------------------------------------------------------------------------------------------\nDocument 2:\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1572,"cells":{"id":{"kind":"string","value":"989b78f99164-2"},"text":{"kind":"string","value":"We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n----------------------------------------------------------------------------------------------------\nDocument 3:\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \nFirst, beat the opioid epidemic.\n----------------------------------------------------------------------------------------------------\nDocument 4:\nTonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \nThat ends on my watch. \nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1573,"cells":{"id":{"kind":"string","value":"989b78f99164-3"},"text":{"kind":"string","value":"Let’s pass the Paycheck Fairness Act and paid leave. \nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.\nAdding contextual compression with an LLMChainExtractor#\nNow let’s wrap our base retriever with a ContextualCompressionRetriever. We’ll add an LLMChainExtractor, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query.\nfrom langchain.llms import OpenAI\nfrom langchain.retrievers import ContextualCompressionRetriever\nfrom langchain.retrievers.document_compressors import LLMChainExtractor\nllm = OpenAI(temperature=0)\ncompressor = LLMChainExtractor.from_llm(llm)\ncompression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever)\ncompressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\npretty_print_docs(compressed_docs)\nDocument 1:\n\"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\"\n----------------------------------------------------------------------------------------------------\nDocument 2:"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1574,"cells":{"id":{"kind":"string","value":"989b78f99164-4"},"text":{"kind":"string","value":"----------------------------------------------------------------------------------------------------\nDocument 2:\n\"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\"\nMore built-in compressors: filters#\nLLMChainFilter#\nThe LLMChainFilter is slightly simpler but more robust compressor that uses an LLM chain to decide which of the initially retrieved documents to filter out and which ones to return, without manipulating the document contents.\nfrom langchain.retrievers.document_compressors import LLMChainFilter\n_filter = LLMChainFilter.from_llm(llm)\ncompression_retriever = ContextualCompressionRetriever(base_compressor=_filter, base_retriever=retriever)\ncompressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\npretty_print_docs(compressed_docs)\nDocument 1:\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\nEmbeddingsFilter#"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1575,"cells":{"id":{"kind":"string","value":"989b78f99164-5"},"text":{"kind":"string","value":"EmbeddingsFilter#\nMaking an extra LLM call over each retrieved document is expensive and slow. The EmbeddingsFilter provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query.\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.retrievers.document_compressors import EmbeddingsFilter\nembeddings = OpenAIEmbeddings()\nembeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\ncompression_retriever = ContextualCompressionRetriever(base_compressor=embeddings_filter, base_retriever=retriever)\ncompressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\npretty_print_docs(compressed_docs)\nDocument 1:\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n----------------------------------------------------------------------------------------------------\nDocument 2:"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1576,"cells":{"id":{"kind":"string","value":"989b78f99164-6"},"text":{"kind":"string","value":"----------------------------------------------------------------------------------------------------\nDocument 2:\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n----------------------------------------------------------------------------------------------------\nDocument 3:\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \nFirst, beat the opioid epidemic.\nStringing compressors and document transformers together#"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1577,"cells":{"id":{"kind":"string","value":"989b78f99164-7"},"text":{"kind":"string","value":"First, beat the opioid epidemic.\nStringing compressors and document transformers together#\nUsing the DocumentCompressorPipeline we can also easily combine multiple compressors in sequence. Along with compressors we can add BaseDocumentTransformers to our pipeline, which don’t perform any contextual compression but simply perform some transformation on a set of documents. For example TextSplitters can be used as document transformers to split documents into smaller pieces, and the EmbeddingsRedundantFilter can be used to filter out redundant documents based on embedding similarity between documents.\nBelow we create a compressor pipeline by first splitting our docs into smaller chunks, then removing redundant documents, and then filtering based on relevance to the query.\nfrom langchain.document_transformers import EmbeddingsRedundantFilter\nfrom langchain.retrievers.document_compressors import DocumentCompressorPipeline\nfrom langchain.text_splitter import CharacterTextSplitter\nsplitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=\". \")\nredundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)\nrelevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\npipeline_compressor = DocumentCompressorPipeline(\n transformers=[splitter, redundant_filter, relevant_filter]\n)\ncompression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever)\ncompressed_docs = compression_retriever.get_relevant_documents(\"What did the president say about Ketanji Jackson Brown\")\npretty_print_docs(compressed_docs)\nDocument 1:\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson\n----------------------------------------------------------------------------------------------------\nDocument 2:"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1578,"cells":{"id":{"kind":"string","value":"989b78f99164-8"},"text":{"kind":"string","value":"----------------------------------------------------------------------------------------------------\nDocument 2:\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year\n----------------------------------------------------------------------------------------------------\nDocument 3:\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder\nprevious\nCohere Reranker\nnext\nDataberry\n Contents\n \nContextual Compression\nUsing a vanilla vector store retriever\nAdding contextual compression with an LLMChainExtractor\nMore built-in compressors: filters\nLLMChainFilter\nEmbeddingsFilter\nStringing compressors and document transformers together\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html"}}},{"rowIdx":1579,"cells":{"id":{"kind":"string","value":"d7797d46d7af-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nPinecone Hybrid Search\n Contents \nSetup Pinecone\nGet embeddings and sparse encoders\nLoad Retriever\nAdd texts (if necessary)\nUse Retriever\nPinecone Hybrid Search#\nPinecone is a vector database with broad functionality.\nThis notebook goes over how to use a retriever that under the hood uses Pinecone and Hybrid Search.\nThe logic of this retriever is taken from this documentaion\nTo use Pinecone, you must have an API key and an Environment.\nHere are the installation instructions.\n#!pip install pinecone-client pinecone-text\nimport os\nimport getpass\nos.environ['PINECONE_API_KEY'] = getpass.getpass('Pinecone API Key:')\nfrom langchain.retrievers import PineconeHybridSearchRetriever\nos.environ['PINECONE_ENVIRONMENT'] = getpass.getpass('Pinecone Environment:')\nWe want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\nos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\nSetup Pinecone#\nYou should only have to do this part once.\nNote: it’s important to make sure that the “context” field that holds the document text in the metadata is not indexed. Currently you need to specify explicitly the fields you do want to index. For more information checkout Pinecone’s docs.\nimport os\nimport pinecone\napi_key = os.getenv(\"PINECONE_API_KEY\") or \"PINECONE_API_KEY\"\n# find environment next to your API key in the Pinecone console\nenv = os.getenv(\"PINECONE_ENVIRONMENT\") or \"PINECONE_ENVIRONMENT\"\nindex_name = \"langchain-pinecone-hybrid-search\"\npinecone.init(api_key=api_key, enviroment=env)"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html"}}},{"rowIdx":1580,"cells":{"id":{"kind":"string","value":"d7797d46d7af-1"},"text":{"kind":"string","value":"pinecone.init(api_key=api_key, enviroment=env)\npinecone.whoami()\nWhoAmIResponse(username='load', user_label='label', projectname='load-test')\n # create the index\npinecone.create_index(\n name = index_name,\n dimension = 1536, # dimensionality of dense model\n metric = \"dotproduct\", # sparse values supported only for dotproduct\n pod_type = \"s1\",\n metadata_config={\"indexed\": []} # see explaination above\n)\nNow that its created, we can use it\nindex = pinecone.Index(index_name)\nGet embeddings and sparse encoders#\nEmbeddings are used for the dense vectors, tokenizer is used for the sparse vector\nfrom langchain.embeddings import OpenAIEmbeddings\nembeddings = OpenAIEmbeddings()\nTo encode the text to sparse values you can either choose SPLADE or BM25. For out of domain tasks we recommend using BM25.\nFor more information about the sparse encoders you can checkout pinecone-text library docs.\nfrom pinecone_text.sparse import BM25Encoder\n# or from pinecone_text.sparse import SpladeEncoder if you wish to work with SPLADE\n# use default tf-idf values\nbm25_encoder = BM25Encoder().default()\nThe above code is using default tfids values. It’s highly recommended to fit the tf-idf values to your own corpus. You can do it as follow:\ncorpus = [\"foo\", \"bar\", \"world\", \"hello\"]\n# fit tf-idf values on your corpus\nbm25_encoder.fit(corpus)\n# store the values to a json file\nbm25_encoder.dump(\"bm25_values.json\")\n# load to your BM25Encoder object\nbm25_encoder = BM25Encoder().load(\"bm25_values.json\")\nLoad Retriever#"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html"}}},{"rowIdx":1581,"cells":{"id":{"kind":"string","value":"d7797d46d7af-2"},"text":{"kind":"string","value":"Load Retriever#\nWe can now construct the retriever!\nretriever = PineconeHybridSearchRetriever(embeddings=embeddings, sparse_encoder=bm25_encoder, index=index)\nAdd texts (if necessary)#\nWe can optionally add texts to the retriever (if they aren’t already in there)\nretriever.add_texts([\"foo\", \"bar\", \"world\", \"hello\"])\n100%|██████████| 1/1 [00:02<00:00, 2.27s/it]\nUse Retriever#\nWe can now use the retriever!\nresult = retriever.get_relevant_documents(\"foo\")\nresult[0]\nDocument(page_content='foo', metadata={})\nprevious\nMetal\nnext\nPubMed Retriever\n Contents\n \nSetup Pinecone\nGet embeddings and sparse encoders\nLoad Retriever\nAdd texts (if necessary)\nUse Retriever\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html"}}},{"rowIdx":1582,"cells":{"id":{"kind":"string","value":"214c45ef31c4-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nVectorStore\n Contents \nMaximum Marginal Relevance Retrieval\nSimilarity Score Threshold Retrieval\nSpecifying top k\nVectorStore#\nThe index - and therefore the retriever - that LangChain has the most support for is the VectorStoreRetriever. As the name suggests, this retriever is backed heavily by a VectorStore.\nOnce you construct a VectorStore, its very easy to construct a retriever. Let’s walk through an example.\nfrom langchain.document_loaders import TextLoader\nloader = TextLoader('../../../state_of_the_union.txt')\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import FAISS\nfrom langchain.embeddings import OpenAIEmbeddings\ndocuments = loader.load()\ntext_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\ntexts = text_splitter.split_documents(documents)\nembeddings = OpenAIEmbeddings()\ndb = FAISS.from_documents(texts, embeddings)\nExiting: Cleaning up .chroma directory\nretriever = db.as_retriever()\ndocs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\")\nMaximum Marginal Relevance Retrieval#\nBy default, the vectorstore retriever uses similarity search. If the underlying vectorstore support maximum marginal relevance search, you can specify that as the search type.\nretriever = db.as_retriever(search_type=\"mmr\")\ndocs = retriever.get_relevant_documents(\"what did he say abotu ketanji brown jackson\")\nSimilarity Score Threshold Retrieval#\nYou can also use a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold\nretriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5})"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vectorstore.html"}}},{"rowIdx":1583,"cells":{"id":{"kind":"string","value":"214c45ef31c4-1"},"text":{"kind":"string","value":"docs = retriever.get_relevant_documents(\"what did he say abotu ketanji brown jackson\")\nSpecifying top k#\nYou can also specify search kwargs like k to use when doing retrieval.\nretriever = db.as_retriever(search_kwargs={\"k\": 1})\ndocs = retriever.get_relevant_documents(\"what did he say abotu ketanji brown jackson\")\nlen(docs)\n1\nprevious\nTime Weighted VectorStore\nnext\nVespa\n Contents\n \nMaximum Marginal Relevance Retrieval\nSimilarity Score Threshold Retrieval\nSpecifying top k\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vectorstore.html"}}},{"rowIdx":1584,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nSelf-querying\n Contents \nCreating a Pinecone index\nCreating our self-querying retriever\nTesting it out\nFilter k\nSelf-querying#\nIn the notebook we’ll demo the SelfQueryRetriever, which, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to it’s underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documented, but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\nCreating a Pinecone index#\nFirst we’ll want to create a Pinecone VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies.\nTo use Pinecone, you to have pinecone package installed and you must have an API key and an Environment. Here are the installation instructions.\nNOTE: The self-query retriever requires you to have lark package installed.\n# !pip install lark\n#!pip install pinecone-client\nimport os\nimport pinecone\npinecone.init(api_key=os.environ[\"PINECONE_API_KEY\"], environment=os.environ[\"PINECONE_ENV\"])\n/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pinecone/index.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n from tqdm.autonotebook import tqdm\nfrom langchain.schema import Document\nfrom langchain.embeddings.openai import OpenAIEmbeddings"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1585,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-1"},"text":{"kind":"string","value":"from langchain.schema import Document\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import Pinecone\nembeddings = OpenAIEmbeddings()\n# create new index\npinecone.create_index(\"langchain-self-retriever-demo\", dimension=1536)\ndocs = [\n Document(page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\", metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": [\"action\", \"science fiction\"]}),\n Document(page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\", metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2}),\n Document(page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\", metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6}),\n Document(page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\", metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3}),\n Document(page_content=\"Toys come alive and have a blast doing so\", metadata={\"year\": 1995, \"genre\": \"animated\"}),\n Document(page_content=\"Three men walk into the Zone, three men walk out of the Zone\", metadata={\"year\": 1979, \"rating\": 9.9, \"director\": \"Andrei Tarkovsky\", \"genre\": [\"science fiction\", \"thriller\"], \"rating\": 9.9})\n]\nvectorstore = Pinecone.from_documents(\n docs, embeddings, index_name=\"langchain-self-retriever-demo\"\n)\nCreating our self-querying retriever#"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1586,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-2"},"text":{"kind":"string","value":")\nCreating our self-querying retriever#\nNow we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents.\nfrom langchain.llms import OpenAI\nfrom langchain.retrievers.self_query.base import SelfQueryRetriever\nfrom langchain.chains.query_constructor.base import AttributeInfo\nmetadata_field_info=[\n AttributeInfo(\n name=\"genre\",\n description=\"The genre of the movie\", \n type=\"string or list[string]\", \n ),\n AttributeInfo(\n name=\"year\",\n description=\"The year the movie was released\", \n type=\"integer\", \n ),\n AttributeInfo(\n name=\"director\",\n description=\"The name of the movie director\", \n type=\"string\", \n ),\n AttributeInfo(\n name=\"rating\",\n description=\"A 1-10 rating for the movie\",\n type=\"float\"\n ),\n]\ndocument_content_description = \"Brief summary of a movie\"\nllm = OpenAI(temperature=0)\nretriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True)\nTesting it out#\nAnd now we can try actually using our retriever!\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"What are some movies about dinosaurs\")\nquery='dinosaur' filter=None\n[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': ['action', 'science fiction'], 'rating': 7.7, 'year': 1993.0}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1587,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-3"},"text":{"kind":"string","value":"Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0}),\n Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}),\n Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'director': 'Christopher Nolan', 'rating': 8.2, 'year': 2010.0})]\n# This example only specifies a filter\nretriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")\nquery=' ' filter=Comparison(comparator=, attribute='rating', value=8.5)\n[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}),\n Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})]\n# This example specifies a query and a filter\nretriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")\nquery='women' filter=Comparison(comparator=, attribute='director', value='Greta Gerwig')"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1588,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-4"},"text":{"kind":"string","value":"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'director': 'Greta Gerwig', 'rating': 8.3, 'year': 2019.0})]\n# This example specifies a composite filter\nretriever.get_relevant_documents(\"What's a highly rated (above 8.5) science fiction film?\")\nquery=' ' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='genre', value='science fiction'), Comparison(comparator=, attribute='rating', value=8.5)])\n[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})]\n# This example specifies a query and composite filter\nretriever.get_relevant_documents(\"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\")\nquery='toys' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='year', value=1990.0), Comparison(comparator=, attribute='year', value=2005.0), Comparison(comparator=, attribute='genre', value='animated')])\n[Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0})]\nFilter k#\nWe can also use the self query retriever to specify k: the number of documents to fetch."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1589,"cells":{"id":{"kind":"string","value":"a9607cc3ce89-5"},"text":{"kind":"string","value":"We can do this by passing enable_limit=True to the constructor.\nretriever = SelfQueryRetriever.from_llm(\n llm, \n vectorstore, \n document_content_description, \n metadata_field_info, \n enable_limit=True,\n verbose=True\n)\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"What are two movies about dinosaurs\")\nprevious\nSelf-querying with Qdrant\nnext\nSVM\n Contents\n \nCreating a Pinecone index\nCreating our self-querying retriever\nTesting it out\nFilter k\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html"}}},{"rowIdx":1590,"cells":{"id":{"kind":"string","value":"21705101c034-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nSelf-querying with Chroma\n Contents \nCreating a Chroma vectorstore\nCreating our self-querying retriever\nTesting it out\nFilter k\nSelf-querying with Chroma#\nChroma is a database for building AI applications with embeddings.\nIn the notebook we’ll demo the SelfQueryRetriever wrapped around a Chroma vector store.\nCreating a Chroma vectorstore#\nFirst we’ll want to create a Chroma VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies.\nNOTE: The self-query retriever requires you to have lark installed (pip install lark). We also need the chromadb package.\n#!pip install lark\n#!pip install chromadb\nWe want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\nimport os\nimport getpass\nos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\nfrom langchain.schema import Document\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import Chroma\nembeddings = OpenAIEmbeddings()\ndocs = [\n Document(page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\", metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"}),\n Document(page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\", metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2}),\n Document(page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\", metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1591,"cells":{"id":{"kind":"string","value":"21705101c034-1"},"text":{"kind":"string","value":"Document(page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\", metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3}),\n Document(page_content=\"Toys come alive and have a blast doing so\", metadata={\"year\": 1995, \"genre\": \"animated\"}),\n Document(page_content=\"Three men walk into the Zone, three men walk out of the Zone\", metadata={\"year\": 1979, \"rating\": 9.9, \"director\": \"Andrei Tarkovsky\", \"genre\": \"science fiction\", \"rating\": 9.9})\n]\nvectorstore = Chroma.from_documents(\n docs, embeddings\n)\nUsing embedded DuckDB without persistence: data will be transient\nCreating our self-querying retriever#\nNow we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents.\nfrom langchain.llms import OpenAI\nfrom langchain.retrievers.self_query.base import SelfQueryRetriever\nfrom langchain.chains.query_constructor.base import AttributeInfo\nmetadata_field_info=[\n AttributeInfo(\n name=\"genre\",\n description=\"The genre of the movie\", \n type=\"string or list[string]\", \n ),\n AttributeInfo(\n name=\"year\",\n description=\"The year the movie was released\", \n type=\"integer\", \n ),\n AttributeInfo(\n name=\"director\",\n description=\"The name of the movie director\", \n type=\"string\", \n ),\n AttributeInfo(\n name=\"rating\",\n description=\"A 1-10 rating for the movie\",\n type=\"float\"\n ),\n]"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1592,"cells":{"id":{"kind":"string","value":"21705101c034-2"},"text":{"kind":"string","value":"type=\"float\"\n ),\n]\ndocument_content_description = \"Brief summary of a movie\"\nllm = OpenAI(temperature=0)\nretriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True)\nTesting it out#\nAnd now we can try actually using our retriever!\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"What are some movies about dinosaurs\")\nquery='dinosaur' filter=None\n[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}),\n Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}),\n Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'year': 2010, 'director': 'Christopher Nolan', 'rating': 8.2})]\n# This example only specifies a filter\nretriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")\nquery=' ' filter=Comparison(comparator=, attribute='rating', value=8.5)\n[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1593,"cells":{"id":{"kind":"string","value":"21705101c034-3"},"text":{"kind":"string","value":"Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})]\n# This example specifies a query and a filter\nretriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")\nquery='women' filter=Comparison(comparator=, attribute='director', value='Greta Gerwig')\n[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3})]\n# This example specifies a composite filter\nretriever.get_relevant_documents(\"What's a highly rated (above 8.5) science fiction film?\")\nquery=' ' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='genre', value='science fiction'), Comparison(comparator=, attribute='rating', value=8.5)])\n[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})]\n# This example specifies a query and composite filter\nretriever.get_relevant_documents(\"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\")"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1594,"cells":{"id":{"kind":"string","value":"21705101c034-4"},"text":{"kind":"string","value":"query='toys' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='year', value=1990), Comparison(comparator=, attribute='year', value=2005), Comparison(comparator=, attribute='genre', value='animated')])\n[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]\nFilter k#\nWe can also use the self query retriever to specify k: the number of documents to fetch.\nWe can do this by passing enable_limit=True to the constructor.\nretriever = SelfQueryRetriever.from_llm(\n llm, \n vectorstore, \n document_content_description, \n metadata_field_info, \n enable_limit=True,\n verbose=True\n)\n# This example only specifies a relevant query\nretriever.get_relevant_documents(\"what are two movies about dinosaurs\")\nquery='dinosaur' filter=None\n[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}),\n Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}),"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1595,"cells":{"id":{"kind":"string","value":"21705101c034-5"},"text":{"kind":"string","value":"Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'year': 2010, 'director': 'Christopher Nolan', 'rating': 8.2})]\nprevious\nChatGPT Plugin\nnext\nCohere Reranker\n Contents\n \nCreating a Chroma vectorstore\nCreating our self-querying retriever\nTesting it out\nFilter k\nBy Harrison Chase\n \n © Copyright 2023, Harrison Chase.\n \n Last updated on Jun 16, 2023."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html"}}},{"rowIdx":1596,"cells":{"id":{"kind":"string","value":"127fa9812842-0"},"text":{"kind":"string","value":".ipynb\n.pdf\nArxiv\n Contents \nInstallation\nExamples\nRunning retriever\nQuestion Answering on facts\nArxiv#\narXiv is an open-access archive for 2 million scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics.\nThis notebook shows how to retrieve scientific articles from Arxiv.org into the Document format that is used downstream.\nInstallation#\nFirst, you need to install arxiv python package.\n#!pip install arxiv\nArxivRetriever has these arguments:\noptional load_max_docs: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments. There is a hard limit of 300 for now.\noptional load_all_available_meta: default=False. By default only the most important fields downloaded: Published (date when document was published/last updated), Title, Authors, Summary. If True, other fields also downloaded.\nget_relevant_documents() has one argument, query: free text which used to find documents in Arxiv.org\nExamples#\nRunning retriever#\nfrom langchain.retrievers import ArxivRetriever\nretriever = ArxivRetriever(load_max_docs=2)\ndocs = retriever.get_relevant_documents(query='1605.08386')\ndocs[0].metadata # meta-information of the Document\n{'Published': '2016-05-26',\n 'Title': 'Heat-bath random walks with Markov bases',\n 'Authors': 'Caprice Stanley, Tobias Windisch',"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html"}}},{"rowIdx":1597,"cells":{"id":{"kind":"string","value":"127fa9812842-1"},"text":{"kind":"string","value":"'Authors': 'Caprice Stanley, Tobias Windisch',\n 'Summary': 'Graphs on lattice points are studied whose edges come from a finite set of\\nallowed moves of arbitrary length. We show that the diameter of these graphs on\\nfibers of a fixed integer matrix can be bounded from above by a constant. We\\nthen study the mixing behaviour of heat-bath random walks on these graphs. We\\nalso state explicit conditions on the set of moves so that the heat-bath random\\nwalk, a generalization of the Glauber dynamics, is an expander in fixed\\ndimension.'}\ndocs[0].page_content[:400] # a content of the Document \n'arXiv:1605.08386v1 [math.CO] 26 May 2016\\nHEAT-BATH RANDOM WALKS WITH MARKOV BASES\\nCAPRICE STANLEY AND TOBIAS WINDISCH\\nAbstract. Graphs on lattice points are studied whose edges come from a finite set of\\nallowed moves of arbitrary length. We show that the diameter of these graphs on fibers of a\\nfixed integer matrix can be bounded from above by a constant. We then study the mixing\\nbehaviour of heat-b'\nQuestion Answering on facts#\n# get a token: https://platform.openai.com/account/api-keys\nfrom getpass import getpass\nOPENAI_API_KEY = getpass()\nimport os\nos.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.chains import ConversationalRetrievalChain\nmodel = ChatOpenAI(model_name='gpt-3.5-turbo') # switch to 'gpt-4'\nqa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)\nquestions = ["},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html"}}},{"rowIdx":1598,"cells":{"id":{"kind":"string","value":"127fa9812842-2"},"text":{"kind":"string","value":"questions = [\n \"What are Heat-bath random walks with Markov base?\",\n \"What is the ImageBind model?\",\n \"How does Compositional Reasoning with Large Language Models works?\", \n] \nchat_history = []\nfor question in questions: \n result = qa({\"question\": question, \"chat_history\": chat_history})\n chat_history.append((question, result['answer']))\n print(f\"-> **Question**: {question} \\n\")\n print(f\"**Answer**: {result['answer']} \\n\")\n-> **Question**: What are Heat-bath random walks with Markov base? \n**Answer**: I'm not sure, as I don't have enough context to provide a definitive answer. The term \"Heat-bath random walks with Markov base\" is not mentioned in the given text. Could you provide more information or context about where you encountered this term? \n-> **Question**: What is the ImageBind model? \n**Answer**: ImageBind is an approach developed by Facebook AI Research to learn a joint embedding across six different modalities, including images, text, audio, depth, thermal, and IMU data. The approach uses the binding property of images to align each modality's embedding to image embeddings and achieve an emergent alignment across all modalities. This enables novel multimodal capabilities, including cross-modal retrieval, embedding-space arithmetic, and audio-to-image generation, among others. The approach sets a new state-of-the-art on emergent zero-shot recognition tasks across modalities, outperforming specialist supervised models. Additionally, it shows strong few-shot recognition results and serves as a new way to evaluate vision models for visual and non-visual tasks. \n-> **Question**: How does Compositional Reasoning with Large Language Models works?"},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html"}}},{"rowIdx":1599,"cells":{"id":{"kind":"string","value":"127fa9812842-3"},"text":{"kind":"string","value":"-> **Question**: How does Compositional Reasoning with Large Language Models works? \n**Answer**: Compositional reasoning with large language models refers to the ability of these models to correctly identify and represent complex concepts by breaking them down into smaller, more basic parts and combining them in a structured way. This involves understanding the syntax and semantics of language and using that understanding to build up more complex meanings from simpler ones. \nIn the context of the paper \"Does CLIP Bind Concepts? Probing Compositionality in Large Image Models\", the authors focus specifically on the ability of a large pretrained vision and language model (CLIP) to encode compositional concepts and to bind variables in a structure-sensitive way. They examine CLIP's ability to compose concepts in a single-object setting, as well as in situations where concept binding is needed. \nThe authors situate their work within the tradition of research on compositional distributional semantics models (CDSMs), which seek to bridge the gap between distributional models and formal semantics by building architectures which operate over vectors yet still obey traditional theories of linguistic composition. They compare the performance of CLIP with several architectures from research on CDSMs to evaluate its ability to encode and reason about compositional concepts. \nquestions = [\n \"What are Heat-bath random walks with Markov base? Include references to answer.\",\n] \nchat_history = []\nfor question in questions: \n result = qa({\"question\": question, \"chat_history\": chat_history})\n chat_history.append((question, result['answer']))\n print(f\"-> **Question**: {question} \\n\")\n print(f\"**Answer**: {result['answer']} \\n\")\n-> **Question**: What are Heat-bath random walks with Markov base? Include references to answer."},"source":{"kind":"string","value":"rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":15,"numItemsPerPage":100,"numTotalItems":4651,"offset":1500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzI0NzA5Mywic3ViIjoiL2RhdGFzZXRzL0VjbGlwc2VQaGFnZS9sYW5nY2hhaW4tZG9jcy1jc3YiLCJleHAiOjE3NTcyNTA2OTMsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.-vsi8XAgrGDL7uXZi-adH73ifC51hI-d7eOKiWFd4xZyzR3glyyYzRaWawL27TkG9dYvygwVF14yA8ag3jx2BA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
59
127
2b0042d6f350-13
[Document(page_content='Team: Nationals\n"Payroll (millions)": 81.34\n"Wins": 98', lookup_str='', metadata={'source': 'Nationals', 'row': 0}, lookup_index=0), Document(page_content='Team: Reds\n"Payroll (millions)": 82.20\n"Wins": 97', lookup_str='', metadata={'source': 'Reds', 'row': 1}, lookup_index=0), Document(page_content='Team: Yankees\n"Payroll (millions)": 197.96\n"Wins": 95', lookup_str='', metadata={'source': 'Yankees', 'row': 2}, lookup_index=0), Document(page_content='Team: Giants\n"Payroll (millions)": 117.62\n"Wins": 94', lookup_str='', metadata={'source': 'Giants', 'row': 3}, lookup_index=0), Document(page_content='Team: Braves\n"Payroll (millions)": 83.31\n"Wins": 94', lookup_str='', metadata={'source': 'Braves', 'row': 4}, lookup_index=0), Document(page_content='Team: Athletics\n"Payroll (millions)": 55.37\n"Wins": 94', lookup_str='', metadata={'source': 'Athletics', 'row': 5}, lookup_index=0), Document(page_content='Team: Rangers\n"Payroll (millions)": 120.51\n"Wins": 93', lookup_str='', metadata={'source': 'Rangers', 'row': 6}, lookup_index=0), Document(page_content='Team: Orioles\n"Payroll (millions)": 81.43\n"Wins": 93', lookup_str='', metadata={'source': 'Orioles', 'row': 7}, lookup_index=0), Document(page_content='Team: Rays\n"Payroll
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-14
7}, lookup_index=0), Document(page_content='Team: Rays\n"Payroll (millions)": 64.17\n"Wins": 90', lookup_str='', metadata={'source': 'Rays', 'row': 8}, lookup_index=0), Document(page_content='Team: Angels\n"Payroll (millions)": 154.49\n"Wins": 89', lookup_str='', metadata={'source': 'Angels', 'row': 9}, lookup_index=0), Document(page_content='Team: Tigers\n"Payroll (millions)": 132.30\n"Wins": 88', lookup_str='', metadata={'source': 'Tigers', 'row': 10}, lookup_index=0), Document(page_content='Team: Cardinals\n"Payroll (millions)": 110.30\n"Wins": 88', lookup_str='', metadata={'source': 'Cardinals', 'row': 11}, lookup_index=0), Document(page_content='Team: Dodgers\n"Payroll (millions)": 95.14\n"Wins": 86', lookup_str='', metadata={'source': 'Dodgers', 'row': 12}, lookup_index=0), Document(page_content='Team: White Sox\n"Payroll (millions)": 96.92\n"Wins": 85', lookup_str='', metadata={'source': 'White Sox', 'row': 13}, lookup_index=0), Document(page_content='Team: Brewers\n"Payroll (millions)": 97.65\n"Wins": 83', lookup_str='', metadata={'source': 'Brewers', 'row': 14}, lookup_index=0), Document(page_content='Team: Phillies\n"Payroll (millions)": 174.54\n"Wins": 81', lookup_str='', metadata={'source': 'Phillies', 'row': 15}, lookup_index=0), Document(page_content='Team:
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-15
'row': 15}, lookup_index=0), Document(page_content='Team: Diamondbacks\n"Payroll (millions)": 74.28\n"Wins": 81', lookup_str='', metadata={'source': 'Diamondbacks', 'row': 16}, lookup_index=0), Document(page_content='Team: Pirates\n"Payroll (millions)": 63.43\n"Wins": 79', lookup_str='', metadata={'source': 'Pirates', 'row': 17}, lookup_index=0), Document(page_content='Team: Padres\n"Payroll (millions)": 55.24\n"Wins": 76', lookup_str='', metadata={'source': 'Padres', 'row': 18}, lookup_index=0), Document(page_content='Team: Mariners\n"Payroll (millions)": 81.97\n"Wins": 75', lookup_str='', metadata={'source': 'Mariners', 'row': 19}, lookup_index=0), Document(page_content='Team: Mets\n"Payroll (millions)": 93.35\n"Wins": 74', lookup_str='', metadata={'source': 'Mets', 'row': 20}, lookup_index=0), Document(page_content='Team: Blue Jays\n"Payroll (millions)": 75.48\n"Wins": 73', lookup_str='', metadata={'source': 'Blue Jays', 'row': 21}, lookup_index=0), Document(page_content='Team: Royals\n"Payroll (millions)": 60.91\n"Wins": 72', lookup_str='', metadata={'source': 'Royals', 'row': 22}, lookup_index=0), Document(page_content='Team: Marlins\n"Payroll (millions)": 118.07\n"Wins": 69', lookup_str='', metadata={'source': 'Marlins', 'row': 23}, lookup_index=0),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-16
metadata={'source': 'Marlins', 'row': 23}, lookup_index=0), Document(page_content='Team: Red Sox\n"Payroll (millions)": 173.18\n"Wins": 69', lookup_str='', metadata={'source': 'Red Sox', 'row': 24}, lookup_index=0), Document(page_content='Team: Indians\n"Payroll (millions)": 78.43\n"Wins": 68', lookup_str='', metadata={'source': 'Indians', 'row': 25}, lookup_index=0), Document(page_content='Team: Twins\n"Payroll (millions)": 94.08\n"Wins": 66', lookup_str='', metadata={'source': 'Twins', 'row': 26}, lookup_index=0), Document(page_content='Team: Rockies\n"Payroll (millions)": 78.06\n"Wins": 64', lookup_str='', metadata={'source': 'Rockies', 'row': 27}, lookup_index=0), Document(page_content='Team: Cubs\n"Payroll (millions)": 88.19\n"Wins": 61', lookup_str='', metadata={'source': 'Cubs', 'row': 28}, lookup_index=0), Document(page_content='Team: Astros\n"Payroll (millions)": 60.65\n"Wins": 55', lookup_str='', metadata={'source': 'Astros', 'row': 29}, lookup_index=0)]
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-17
UnstructuredCSVLoader# You can also load the table using the UnstructuredCSVLoader. One advantage of using UnstructuredCSVLoader is that if you use it in "elements" mode, an HTML representation of the table will be available in the metadata. from langchain.document_loaders.csv_loader import UnstructuredCSVLoader loader = UnstructuredCSVLoader(file_path='example_data/mlb_teams_2012.csv', mode="elements") docs = loader.load() print(docs[0].metadata["text_as_html"]) <table border="1" class="dataframe"> <tbody> <tr> <td>Nationals</td> <td>81.34</td> <td>98</td> </tr> <tr> <td>Reds</td> <td>82.20</td> <td>97</td> </tr> <tr> <td>Yankees</td> <td>197.96</td> <td>95</td> </tr> <tr> <td>Giants</td> <td>117.62</td> <td>94</td> </tr> <tr> <td>Braves</td> <td>83.31</td> <td>94</td> </tr> <tr> <td>Athletics</td> <td>55.37</td> <td>94</td> </tr> <tr> <td>Rangers</td> <td>120.51</td> <td>93</td>
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-18
<td>120.51</td> <td>93</td> </tr> <tr> <td>Orioles</td> <td>81.43</td> <td>93</td> </tr> <tr> <td>Rays</td> <td>64.17</td> <td>90</td> </tr> <tr> <td>Angels</td> <td>154.49</td> <td>89</td> </tr> <tr> <td>Tigers</td> <td>132.30</td> <td>88</td> </tr> <tr> <td>Cardinals</td> <td>110.30</td> <td>88</td> </tr> <tr> <td>Dodgers</td> <td>95.14</td> <td>86</td> </tr> <tr> <td>White Sox</td> <td>96.92</td> <td>85</td> </tr> <tr> <td>Brewers</td> <td>97.65</td> <td>83</td> </tr> <tr> <td>Phillies</td> <td>174.54</td> <td>81</td> </tr> <tr> <td>Diamondbacks</td>
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-19
</tr> <tr> <td>Diamondbacks</td> <td>74.28</td> <td>81</td> </tr> <tr> <td>Pirates</td> <td>63.43</td> <td>79</td> </tr> <tr> <td>Padres</td> <td>55.24</td> <td>76</td> </tr> <tr> <td>Mariners</td> <td>81.97</td> <td>75</td> </tr> <tr> <td>Mets</td> <td>93.35</td> <td>74</td> </tr> <tr> <td>Blue Jays</td> <td>75.48</td> <td>73</td> </tr> <tr> <td>Royals</td> <td>60.91</td> <td>72</td> </tr> <tr> <td>Marlins</td> <td>118.07</td> <td>69</td> </tr> <tr> <td>Red Sox</td> <td>173.18</td> <td>69</td> </tr> <tr> <td>Indians</td> <td>78.43</td> <td>68</td>
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
2b0042d6f350-20
<td>78.43</td> <td>68</td> </tr> <tr> <td>Twins</td> <td>94.08</td> <td>66</td> </tr> <tr> <td>Rockies</td> <td>78.06</td> <td>64</td> </tr> <tr> <td>Cubs</td> <td>88.19</td> <td>61</td> </tr> <tr> <td>Astros</td> <td>60.65</td> <td>55</td> </tr> </tbody> </table> previous Copy Paste next Email Contents Customizing the csv parsing and loading Specify a column to identify the document source UnstructuredCSVLoader By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/csv.html
0adfb2e97933-0
.ipynb .pdf Gutenberg Gutenberg# Project Gutenberg is an online library of free eBooks. This notebook covers how to load links to Gutenberg e-books into a document format that we can use downstream. from langchain.document_loaders import GutenbergLoader loader = GutenbergLoader('https://www.gutenberg.org/cache/epub/69972/pg69972.txt') data = loader.load() data[0].page_content[:300] 'The Project Gutenberg eBook of The changed brides, by Emma Dorothy\r\n\n\nEliza Nevitte Southworth\r\n\n\n\r\n\n\nThis eBook is for the use of anyone anywhere in the United States and\r\n\n\nmost other parts of the world at no cost and with almost no restrictions\r\n\n\nwhatsoever. You may copy it, give it away or re-u' data[0].metadata {'source': 'https://www.gutenberg.org/cache/epub/69972/pg69972.txt'} previous College Confidential next Hacker News By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/document_loaders/examples/gutenberg.html
c3caf977f19c-0
.ipynb .pdf Getting Started Getting Started# The default recommended text splitter is the RecursiveCharacterTextSplitter. This text splitter takes a list of characters. It tries to create chunks based on splitting on the first character, but if any chunks are too large it then moves onto the next character, and so forth. By default the characters it tries to split on are ["\n\n", "\n", " ", ""] In addition to controlling which characters you can split on, you can also control a few other things: length_function: how the length of chunks is calculated. Defaults to just counting number of characters, but it’s pretty common to pass a token counter here. chunk_size: the maximum size of your chunks (as measured by the length function). chunk_overlap: the maximum overlap between chunks. It can be nice to have some overlap to maintain some continuity between chunks (eg do a sliding window). add_start_index : wether to include the starting position of each chunk within the original document in the metadata. # This is a long document we can split up. with open('../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( # Set a really small chunk size, just to show. chunk_size = 100, chunk_overlap = 20, length_function = len, add_start_index = True, ) texts = text_splitter.create_documents([state_of_the_union]) print(texts[0]) print(texts[1]) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and' metadata={'start_index': 0}
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/getting_started.html
c3caf977f19c-1
page_content='of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.' metadata={'start_index': 82} previous Text Splitters next Character By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/getting_started.html
44526870d883-0
.ipynb .pdf CodeTextSplitter Contents Python JS Solidity Markdown Latex HTML CodeTextSplitter# CodeTextSplitter allows you to split your code with multiple language support. Import enum Language and specify the language. from langchain.text_splitter import ( RecursiveCharacterTextSplitter, Language, ) # Full list of support languages [e.value for e in Language] ['cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html', 'sol'] # You can also see the separators used for a given language RecursiveCharacterTextSplitter.get_separators_for_language(Language.PYTHON) ['\nclass ', '\ndef ', '\n\tdef ', '\n\n', '\n', ' ', ''] Python# Here’s an example using the PythonTextSplitter PYTHON_CODE = """ def hello_world(): print("Hello, World!") # Call the function hello_world() """ python_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.PYTHON, chunk_size=50, chunk_overlap=0 ) python_docs = python_splitter.create_documents([PYTHON_CODE]) python_docs [Document(page_content='def hello_world():\n print("Hello, World!")', metadata={}), Document(page_content='# Call the function\nhello_world()', metadata={})] JS# Here’s an example using the JS text splitter JS_CODE = """ function helloWorld() { console.log("Hello, World!"); } // Call the function helloWorld(); """
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
44526870d883-1
} // Call the function helloWorld(); """ js_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.JS, chunk_size=60, chunk_overlap=0 ) js_docs = js_splitter.create_documents([JS_CODE]) js_docs [Document(page_content='function helloWorld() {\n console.log("Hello, World!");\n}', metadata={}), Document(page_content='// Call the function\nhelloWorld();', metadata={})] Solidity# Here’s an example using the Solidity text splitter SOL_CODE = """ pragma solidity ^0.8.20; contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; } } """ sol_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.SOL, chunk_size=128, chunk_overlap=0 ) sol_docs = sol_splitter.create_documents([SOL_CODE]) sol_docs [Document(page_content='pragma solidity ^0.8.20;', metadata={}), Document(page_content='contract HelloWorld {\n function add(uint a, uint b) pure public returns(uint) {\n return a + b;\n }\n}', metadata={})] Markdown# Here’s an example using the Markdown text splitter. markdown_text = """ # 🦜️🔗 LangChain ⚡ Building applications with LLMs through composability ⚡ ## Quick Install ```bash # Hopefully this code block isn't split pip install langchain ``` As an open source project in a rapidly developing field, we are extremely open to contributions. """ md_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0 )
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
44526870d883-2
language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0 ) md_docs = md_splitter.create_documents([markdown_text]) md_docs [Document(page_content='# 🦜️🔗 LangChain', metadata={}), Document(page_content='⚡ Building applications with LLMs through composability ⚡', metadata={}), Document(page_content='## Quick Install', metadata={}), Document(page_content="```bash\n# Hopefully this code block isn't split", metadata={}), Document(page_content='pip install langchain', metadata={}), Document(page_content='```', metadata={}), Document(page_content='As an open source project in a rapidly developing field, we', metadata={}), Document(page_content='are extremely open to contributions.', metadata={})] Latex# Here’s an example on Latex text latex_text = """ \documentclass{article} \begin{document} \maketitle \section{Introduction} Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis. \subsection{History of LLMs} The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance. \subsection{Applications of LLMs}
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
44526870d883-3
\subsection{Applications of LLMs} LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics. \end{document} """ latex_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0 ) latex_docs = latex_splitter.create_documents([latex_text]) latex_docs [Document(page_content='\\documentclass{article}\n\n\x08egin{document}\n\n\\maketitle', metadata={}), Document(page_content='\\section{Introduction}', metadata={}), Document(page_content='Large language models (LLMs) are a type of machine learning', metadata={}), Document(page_content='model that can be trained on vast amounts of text data to', metadata={}), Document(page_content='generate human-like language. In recent years, LLMs have', metadata={}), Document(page_content='made significant advances in a variety of natural language', metadata={}), Document(page_content='processing tasks, including language translation, text', metadata={}), Document(page_content='generation, and sentiment analysis.', metadata={}), Document(page_content='\\subsection{History of LLMs}', metadata={}), Document(page_content='The earliest LLMs were developed in the 1980s and 1990s,', metadata={}), Document(page_content='but they were limited by the amount of data that could be', metadata={}), Document(page_content='processed and the computational power available at the', metadata={}), Document(page_content='time. In the past decade, however, advances in hardware and', metadata={}), Document(page_content='software have made it possible to train LLMs on massive', metadata={}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
44526870d883-4
Document(page_content='datasets, leading to significant improvements in', metadata={}), Document(page_content='performance.', metadata={}), Document(page_content='\\subsection{Applications of LLMs}', metadata={}), Document(page_content='LLMs have many applications in industry, including', metadata={}), Document(page_content='chatbots, content creation, and virtual assistants. They', metadata={}), Document(page_content='can also be used in academia for research in linguistics,', metadata={}), Document(page_content='psychology, and computational linguistics.', metadata={}), Document(page_content='\\end{document}', metadata={})] HTML# Here’s an example using an HTML text splitter html_text = """ <!DOCTYPE html> <html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html> """ html_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0 ) html_docs = html_splitter.create_documents([html_text]) html_docs [Document(page_content='<!DOCTYPE html>\n<html>\n <head>', metadata={}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
44526870d883-5
Document(page_content='<title>🦜️🔗 LangChain</title>\n <style>', metadata={}), Document(page_content='body {', metadata={}), Document(page_content='font-family: Arial, sans-serif;', metadata={}), Document(page_content='}\n h1 {', metadata={}), Document(page_content='color: darkblue;\n }', metadata={}), Document(page_content='</style>\n </head>\n <body>\n <div>', metadata={}), Document(page_content='<h1>🦜️🔗 LangChain</h1>', metadata={}), Document(page_content='<p>⚡ Building applications with LLMs through', metadata={}), Document(page_content='composability ⚡</p>', metadata={}), Document(page_content='</div>\n <div>', metadata={}), Document(page_content='As an open source project in a rapidly', metadata={}), Document(page_content='developing field, we are extremely open to contributions.', metadata={}), Document(page_content='</div>\n </body>\n</html>', metadata={})] previous Character next NLTK Contents Python JS Solidity Markdown Latex HTML By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/code_splitter.html
de368ca32158-0
.ipynb .pdf spaCy spaCy# spaCy is an open-source software library for advanced natural language processing, written in the programming languages Python and Cython. Another alternative to NLTK is to use Spacy tokenizer. How the text is split: by spaCy tokenizer How the chunk size is measured: by number of characters #!pip install spacy # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import SpacyTextSplitter text_splitter = SpacyTextSplitter(chunk_size=1000) texts = text_splitter.split_text(state_of_the_union) print(texts[0]) Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. Last year COVID-19 kept us apart. This year we are finally together again. Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. With a duty to one another to the American people to the Constitution. And with an unwavering resolve that freedom will always triumph over tyranny. Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. He met the Ukrainian people. From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. previous Recursive Character next Tiktoken By Harrison Chase
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/spacy.html
de368ca32158-1
previous Recursive Character next Tiktoken By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/spacy.html
663fb253628f-0
.ipynb .pdf Tiktoken Tiktoken# tiktoken is a fast BPE tokeniser created by OpenAI. How the text is split: by tiktoken tokens How the chunk size is measured: by tiktoken tokens #!pip install tiktoken # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import TokenTextSplitter text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0) texts = text_splitter.split_text(state_of_the_union) print(texts[0]) Madam Speaker, Madam Vice President, our previous spaCy next Hugging Face tokenizer By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/tiktoken_splitter.html
785ac476db93-0
.ipynb .pdf Hugging Face tokenizer Hugging Face tokenizer# Hugging Face has many tokenizers. We use Hugging Face tokenizer, the GPT2TokenizerFast to count the text length in tokens. How the text is split: by character passed in How the chunk size is measured: by number of tokens calculated by the Hugging Face tokenizer from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import CharacterTextSplitter text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(tokenizer, chunk_size=100, chunk_overlap=0) texts = text_splitter.split_text(state_of_the_union) print(texts[0]) Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. Last year COVID-19 kept us apart. This year we are finally together again. Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. With a duty to one another to the American people to the Constitution. previous Tiktoken next tiktoken (OpenAI) tokenizer By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/huggingface_length_function.html
17aca0b5d2a3-0
.ipynb .pdf Recursive Character Recursive Character# This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is ["\n\n", "\n", " ", ""]. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text. How the text is split: by list of characters How the chunk size is measured: by number of characters # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( # Set a really small chunk size, just to show. chunk_size = 100, chunk_overlap = 20, length_function = len, ) texts = text_splitter.create_documents([state_of_the_union]) print(texts[0]) print(texts[1]) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and' lookup_str='' metadata={} lookup_index=0 page_content='of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.' lookup_str='' metadata={} lookup_index=0 text_splitter.split_text(state_of_the_union)[:2] ['Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and', 'of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.'] previous NLTK next spaCy By Harrison Chase
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/recursive_text_splitter.html
17aca0b5d2a3-1
previous NLTK next spaCy By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/recursive_text_splitter.html
a040b66c6ffb-0
.ipynb .pdf tiktoken (OpenAI) tokenizer tiktoken (OpenAI) tokenizer# tiktoken is a fast BPE tokenizer created by OpenAI. We can use it to estimate tokens used. It will probably be more accurate for the OpenAI models. How the text is split: by character passed in How the chunk size is measured: by tiktoken tokenizer #!pip install tiktoken # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import CharacterTextSplitter text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=100, chunk_overlap=0) texts = text_splitter.split_text(state_of_the_union) print(texts[0]) Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. Last year COVID-19 kept us apart. This year we are finally together again. Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. With a duty to one another to the American people to the Constitution. previous Hugging Face tokenizer next Vectorstores By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/tiktoken.html
ee7c634c6fbd-0
.ipynb .pdf NLTK NLTK# The Natural Language Toolkit, or more commonly NLTK, is a suite of libraries and programs for symbolic and statistical natural language processing (NLP) for English written in the Python programming language. Rather than just splitting on “\n\n”, we can use NLTK to split based on NLTK tokenizers. How the text is split: by NLTK tokenizer. How the chunk size is measured:by number of characters #pip install nltk # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import NLTKTextSplitter text_splitter = NLTKTextSplitter(chunk_size=1000) texts = text_splitter.split_text(state_of_the_union) print(texts[0]) Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. Last year COVID-19 kept us apart. This year we are finally together again. Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. With a duty to one another to the American people to the Constitution. And with an unwavering resolve that freedom will always triumph over tyranny. Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. He met the Ukrainian people. From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/nltk.html
ee7c634c6fbd-1
Groups of citizens blocking tanks with their bodies. previous CodeTextSplitter next Recursive Character By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/nltk.html
d7922252429a-0
.ipynb .pdf Character Character# This is the simplest method. This splits based on characters (by default “\n\n”) and measure chunk length by number of characters. How the text is split: by single character How the chunk size is measured: by number of characters # This is a long document we can split up. with open('../../../state_of_the_union.txt') as f: state_of_the_union = f.read() from langchain.text_splitter import CharacterTextSplitter text_splitter = CharacterTextSplitter( separator = "\n\n", chunk_size = 1000, chunk_overlap = 200, length_function = len, ) texts = text_splitter.create_documents([state_of_the_union]) print(texts[0])
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html
d7922252429a-1
print(texts[0]) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' lookup_str='' metadata={} lookup_index=0 Here’s an example of passing metadata along with the documents, notice that it is split along with the documents. metadatas = [{"document": 1}, {"document": 2}] documents = text_splitter.create_documents([state_of_the_union, state_of_the_union], metadatas=metadatas) print(documents[0])
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html
d7922252429a-2
print(documents[0]) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' lookup_str='' metadata={'document': 1} lookup_index=0 text_splitter.split_text(state_of_the_union)[0]
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html
d7922252429a-3
text_splitter.split_text(state_of_the_union)[0] 'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' previous Getting Started next CodeTextSplitter By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/text_splitters/examples/character_text_splitter.html
a16d2d34b47f-0
.ipynb .pdf ElasticSearch BM25 Contents Create New Retriever Add texts (if necessary) Use Retriever ElasticSearch BM25# Elasticsearch is a distributed, RESTful search and analytics engine. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents. In information retrieval, Okapi BM25 (BM is an abbreviation of best matching) is a ranking function used by search engines to estimate the relevance of documents to a given search query. It is based on the probabilistic retrieval framework developed in the 1970s and 1980s by Stephen E. Robertson, Karen Spärck Jones, and others. The name of the actual ranking function is BM25. The fuller name, Okapi BM25, includes the name of the first system to use it, which was the Okapi information retrieval system, implemented at London’s City University in the 1980s and 1990s. BM25 and its newer variants, e.g. BM25F (a version of BM25 that can take document structure and anchor text into account), represent TF-IDF-like retrieval functions used in document retrieval. This notebook shows how to use a retriever that uses ElasticSearch and BM25. For more information on the details of BM25 see this blog post. #!pip install elasticsearch from langchain.retrievers import ElasticSearchBM25Retriever Create New Retriever# elasticsearch_url="http://localhost:9200" retriever = ElasticSearchBM25Retriever.create(elasticsearch_url, "langchain-index-4") # Alternatively, you can load an existing index # import elasticsearch # elasticsearch_url="http://localhost:9200"
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/elastic_search_bm25.html
a16d2d34b47f-1
# import elasticsearch # elasticsearch_url="http://localhost:9200" # retriever = ElasticSearchBM25Retriever(elasticsearch.Elasticsearch(elasticsearch_url), "langchain-index") Add texts (if necessary)# We can optionally add texts to the retriever (if they aren’t already in there) retriever.add_texts(["foo", "bar", "world", "hello", "foo bar"]) ['cbd4cb47-8d9f-4f34-b80e-ea871bc49856', 'f3bd2e24-76d1-4f9b-826b-ec4c0e8c7365', '8631bfc8-7c12-48ee-ab56-8ad5f373676e', '8be8374c-3253-4d87-928d-d73550a2ecf0', 'd79f457b-2842-4eab-ae10-77aa420b53d7'] Use Retriever# We can now use the retriever! result = retriever.get_relevant_documents("foo") result [Document(page_content='foo', metadata={}), Document(page_content='foo bar', metadata={})] previous Databerry next kNN Contents Create New Retriever Add texts (if necessary) Use Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/elastic_search_bm25.html
fec7d90c114b-0
.ipynb .pdf Time Weighted VectorStore Contents Low Decay Rate High Decay Rate Virtual Time Time Weighted VectorStore# This retriever uses a combination of semantic similarity and a time decay. The algorithm for scoring them is: semantic_similarity + (1.0 - decay_rate) ** hours_passed Notably, hours_passed refers to the hours passed since the object in the retriever was last accessed, not since it was created. This means that frequently accessed objects remain “fresh.” import faiss from datetime import datetime, timedelta from langchain.docstore import InMemoryDocstore from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain.schema import Document from langchain.vectorstores import FAISS Low Decay Rate# A low decay rate (in this, to be extreme, we will set close to 0) means memories will be “remembered” for longer. A decay rate of 0 means memories never be forgotten, making this retriever equivalent to the vector lookup. # Define your embedding model embeddings_model = OpenAIEmbeddings() # Initialize the vectorstore as empty embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) retriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.0000000000000000000000001, k=1) yesterday = datetime.now() - timedelta(days=1) retriever.add_documents([Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]) retriever.add_documents([Document(page_content="hello foo")])
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html
fec7d90c114b-1
retriever.add_documents([Document(page_content="hello foo")]) ['d7f85756-2371-4bdf-9140-052780a0f9b3'] # "Hello World" is returned first because it is most salient, and the decay rate is close to 0., meaning it's still recent enough retriever.get_relevant_documents("hello world") [Document(page_content='hello world', metadata={'last_accessed_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 678341), 'created_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 279596), 'buffer_idx': 0})] High Decay Rate# With a high decay rate (e.g., several 9’s), the recency score quickly goes to 0! If you set this all the way to 1, recency is 0 for all objects, once again making this equivalent to a vector lookup. # Define your embedding model embeddings_model = OpenAIEmbeddings() # Initialize the vectorstore as empty embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) retriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.999, k=1) yesterday = datetime.now() - timedelta(days=1) retriever.add_documents([Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]) retriever.add_documents([Document(page_content="hello foo")]) ['40011466-5bbe-4101-bfd1-e22e7f505de2']
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html
fec7d90c114b-2
# "Hello Foo" is returned first because "hello world" is mostly forgotten retriever.get_relevant_documents("hello world") [Document(page_content='hello foo', metadata={'last_accessed_at': datetime.datetime(2023, 4, 16, 22, 9, 2, 494798), 'created_at': datetime.datetime(2023, 4, 16, 22, 9, 2, 178722), 'buffer_idx': 1})] Virtual Time# Using some utils in LangChain, you can mock out the time component from langchain.utils import mock_now import datetime # Notice the last access time is that date time with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): print(retriever.get_relevant_documents("hello world")) [Document(page_content='hello world', metadata={'last_accessed_at': MockDateTime(2011, 2, 3, 10, 11), 'created_at': datetime.datetime(2023, 5, 13, 21, 0, 27, 279596), 'buffer_idx': 0})] previous TF-IDF next VectorStore Contents Low Decay Rate High Decay Rate Virtual Time By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/time_weighted_vectorstore.html
b7a9c4399cc4-0
.ipynb .pdf Cohere Reranker Contents Set up the base vector store retriever Doing reranking with CohereRerank Cohere Reranker# Cohere is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions. This notebook shows how to use Cohere’s rerank endpoint in a retriever. This builds on top of ideas in the ContextualCompressionRetriever. #!pip install cohere #!pip install faiss # OR (depending on Python version) #!pip install faiss-cpu # get a new token: https://dashboard.cohere.ai/ import os import getpass os.environ['COHERE_API_KEY'] = getpass.getpass('Cohere API Key:') os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') # Helper function for printing docs def pretty_print_docs(docs): print(f"\n{'-' * 100}\n".join([f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)])) Set up the base vector store retriever# Let’s start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs. from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.document_loaders import TextLoader from langchain.vectorstores import FAISS documents = TextLoader('../../../state_of_the_union.txt').load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents)
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-1
texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever(search_kwargs={"k": 20}) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) Document 1: One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ---------------------------------------------------------------------------------------------------- Document 2: As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. ---------------------------------------------------------------------------------------------------- Document 3: A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. ---------------------------------------------------------------------------------------------------- Document 4: He met the Ukrainian people. From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-2
Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. ---------------------------------------------------------------------------------------------------- Document 5: I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. I’ve worked on these issues a long time. I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. So let’s not abandon our streets. Or choose between safety and equal justice. ---------------------------------------------------------------------------------------------------- Document 6: Vice President Harris and I ran for office with a new economic vision for America. Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up and the middle out, not from the top down. Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. America used to have the best roads, bridges, and airports on Earth. Now our infrastructure is ranked 13th in the world. ---------------------------------------------------------------------------------------------------- Document 7: And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. By the end of this year, the deficit will be down to less than half what it was before I took office. The only president ever to cut the deficit by more than one trillion dollars in a single year. Lowering your costs also means demanding more competition. I’m a capitalist, but capitalism without competition isn’t capitalism. It’s exploitation—and it drives up prices.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-3
It’s exploitation—and it drives up prices. ---------------------------------------------------------------------------------------------------- Document 8: For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. Vice President Harris and I ran for office with a new economic vision for America. ---------------------------------------------------------------------------------------------------- Document 9: All told, we created 369,000 new manufacturing jobs in America just last year. Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” It’s time. But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. ---------------------------------------------------------------------------------------------------- Document 10: I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. And fourth, let’s end cancer as we know it. This is personal to me and Jill, to Kamala, and to so many of you. Cancer is the #2 cause of death in America–second only to heart disease. ---------------------------------------------------------------------------------------------------- Document 11: He will never extinguish their love of freedom. He will never weaken the resolve of the free world. We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. The pandemic has been punishing.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-4
The pandemic has been punishing. And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. I understand. ---------------------------------------------------------------------------------------------------- Document 12: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. Last year COVID-19 kept us apart. This year we are finally together again. Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. With a duty to one another to the American people to the Constitution. And with an unwavering resolve that freedom will always triumph over tyranny. ---------------------------------------------------------------------------------------------------- Document 13: I know. One of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can. Committed to military families like Danielle Robinson from Ohio. The widow of Sergeant First Class Heath Robinson. He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. ---------------------------------------------------------------------------------------------------- Document 14: And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. First, beat the opioid epidemic. There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. ---------------------------------------------------------------------------------------------------- Document 15: Third, support our veterans. Veterans are the best of us.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-5
Third, support our veterans. Veterans are the best of us. I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. Our troops in Iraq and Afghanistan faced many dangers. ---------------------------------------------------------------------------------------------------- Document 16: When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. And I know you’re tired, frustrated, and exhausted. But I also know this. ---------------------------------------------------------------------------------------------------- Document 17: Now is the hour. Our moment of responsibility. Our test of resolve and conscience, of history itself. It is in this moment that our character is formed. Our purpose is found. Our future is forged. Well I know this nation. We will meet the test. To protect freedom and liberty, to expand fairness and opportunity. We will save democracy. As hard as these times have been, I am more optimistic about America today than I have been my whole life. ---------------------------------------------------------------------------------------------------- Document 18: He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are. The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. ---------------------------------------------------------------------------------------------------- Document 19:
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-6
---------------------------------------------------------------------------------------------------- Document 19: I understand. I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. That’s why one of the first things I did as President was fight to pass the American Rescue Plan. Because people were hurting. We needed to act, and we did. Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. ---------------------------------------------------------------------------------------------------- Document 20: So let’s not abandon our streets. Or choose between safety and equal justice. Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. Doing reranking with CohereRerank# Now let’s wrap our base retriever with a ContextualCompressionRetriever. We’ll add an CohereRerank, uses the Cohere rerank endpoint to rerank the returned results. from langchain.llms import OpenAI from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank llm = OpenAI(temperature=0) compressor = CohereRerank() compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever) compressed_docs = compression_retriever.get_relevant_documents("What did the president say about Ketanji Jackson Brown") pretty_print_docs(compressed_docs) Document 1: One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-7
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ---------------------------------------------------------------------------------------------------- Document 2: I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. I’ve worked on these issues a long time. I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. So let’s not abandon our streets. Or choose between safety and equal justice. ---------------------------------------------------------------------------------------------------- Document 3: A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. You can of course use this retriever within a QA pipeline from langchain.chains import RetrievalQA chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0), retriever=compression_retriever) chain({"query": query}) {'query': 'What did the president say about Ketanji Brown Jackson', 'result': " The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she is a consensus builder who has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans."} previous Self-querying with Chroma next Contextual Compression Contents
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
b7a9c4399cc4-8
previous Self-querying with Chroma next Contextual Compression Contents Set up the base vector store retriever Doing reranking with CohereRerank By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/cohere-reranker.html
fd71ef51b801-0
.ipynb .pdf PubMed Retriever PubMed Retriever# This notebook goes over how to use PubMed as a retriever PubMed® comprises more than 35 million citations for biomedical literature from MEDLINE, life science journals, and online books. Citations may include links to full text content from PubMed Central and publisher web sites. from langchain.retrievers import PubMedRetriever retriever = PubMedRetriever() retriever.get_relevant_documents("chatgpt") [Document(page_content='', metadata={'uid': '37268021', 'title': 'Dermatology in the wake of an AI revolution: who gets a say?', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>31</Day>'}), Document(page_content='', metadata={'uid': '37267643', 'title': 'What is ChatGPT and what do we do with it? Implications of the age of AI for nursing and midwifery practice and education: An editorial.', 'pub_date': '<Year>2023</Year><Month>May</Month><Day>30</Day>'}), Document(page_content='The nursing field has undergone notable changes over time and is projected to undergo further modifications in the future, owing to the advent of sophisticated technologies and growing healthcare needs. The advent of ChatGPT, an AI-powered language model, is expected to exert a significant influence on the nursing profession, specifically in the domains of patient care and instruction. The present article delves into the ramifications of ChatGPT within the nursing domain and accentuates its capacity and constraints to transform the discipline.', metadata={'uid': '37266721', 'title': 'The Impact of ChatGPT on the Nursing Profession: Revolutionizing Patient Care and Education.', 'pub_date': '<Year>2023</Year><Month>Jun</Month><Day>02</Day>'})]
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pubmed.html
fd71ef51b801-1
previous Pinecone Hybrid Search next Self-querying with Qdrant By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pubmed.html
58a9730fb62e-0
.ipynb .pdf AWS Kendra Contents Using the AWS Kendra Index Retriever AWS Kendra# AWS Kendra is an intelligent search service provided by Amazon Web Services (AWS). It utilizes advanced natural language processing (NLP) and machine learning algorithms to enable powerful search capabilities across various data sources within an organization. Kendra is designed to help users find the information they need quickly and accurately, improving productivity and decision-making. With Kendra, users can search across a wide range of content types, including documents, FAQs, knowledge bases, manuals, and websites. It supports multiple languages and can understand complex queries, synonyms, and contextual meanings to provide highly relevant search results. Using the AWS Kendra Index Retriever# #!pip install boto3 import boto3 from langchain.retrievers import AwsKendraIndexRetriever Create New Retriever kclient = boto3.client('kendra', region_name="us-east-1") retriever = AwsKendraIndexRetriever( kclient=kclient, kendraindex="kendraindex", ) Now you can use retrieved documents from AWS Kendra Index retriever.get_relevant_documents("what is langchain") previous Arxiv next Azure Cognitive Search Contents Using the AWS Kendra Index Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/aws_kendra_index_retriever.html
2b8d91c15960-0
.ipynb .pdf SVM Contents Create New Retriever with Texts Use Retriever SVM# Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection. This notebook goes over how to use a retriever that under the hood uses an SVM using scikit-learn package. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb #!pip install scikit-learn #!pip install lark We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. import os import getpass os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') from langchain.retrievers import SVMRetriever from langchain.embeddings import OpenAIEmbeddings Create New Retriever with Texts# retriever = SVMRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"], OpenAIEmbeddings()) Use Retriever# We can now use the retriever! result = retriever.get_relevant_documents("foo") result [Document(page_content='foo', metadata={}), Document(page_content='foo bar', metadata={}), Document(page_content='hello', metadata={}), Document(page_content='world', metadata={})] previous Self-querying next TF-IDF Contents Create New Retriever with Texts Use Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/svm.html
6fdfbdafed2d-0
.ipynb .pdf kNN Contents Create New Retriever with Texts Use Retriever kNN# In statistics, the k-nearest neighbors algorithm (k-NN) is a non-parametric supervised learning method first developed by Evelyn Fix and Joseph Hodges in 1951, and later expanded by Thomas Cover. It is used for classification and regression. This notebook goes over how to use a retriever that under the hood uses an kNN. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb from langchain.retrievers import KNNRetriever from langchain.embeddings import OpenAIEmbeddings Create New Retriever with Texts# retriever = KNNRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"], OpenAIEmbeddings()) Use Retriever# We can now use the retriever! result = retriever.get_relevant_documents("foo") result [Document(page_content='foo', metadata={}), Document(page_content='foo bar', metadata={}), Document(page_content='hello', metadata={}), Document(page_content='bar', metadata={})] previous ElasticSearch BM25 next LOTR (Merger Retriever) Contents Create New Retriever with Texts Use Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/knn.html
7ebc8806f552-0
.ipynb .pdf Azure Cognitive Search Contents Set up Azure Cognitive Search Using the Azure Cognitive Search Retriever Azure Cognitive Search# Azure Cognitive Search (formerly known as Azure Search) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications. Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you’ll work with the following capabilities: A search engine for full text search over a search index containing user-owned content Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more Programmability through REST APIs and client libraries in Azure SDKs Azure integration at the data layer, machine learning layer, and AI (Cognitive Services) This notebook shows how to use Azure Cognitive Search (ACS) within LangChain. Set up Azure Cognitive Search# To set up ACS, please follow the instrcutions here. Please note the name of your ACS service, the name of your ACS index, your API key. Your API key can be either Admin or Query key, but as we only read data it is recommended to use a Query key. Using the Azure Cognitive Search Retriever# import os from langchain.retrievers import AzureCognitiveSearchRetriever Set Service Name, Index Name and API key as environment variables (alternatively, you can pass them as arguments to AzureCognitiveSearchRetriever). os.environ["AZURE_COGNITIVE_SEARCH_SERVICE_NAME"] = "<YOUR_ACS_SERVICE_NAME>" os.environ["AZURE_COGNITIVE_SEARCH_INDEX_NAME"] ="<YOUR_ACS_INDEX_NAME>"
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/azure_cognitive_search.html
7ebc8806f552-1
os.environ["AZURE_COGNITIVE_SEARCH_API_KEY"] = "<YOUR_API_KEY>" Create the Retriever retriever = AzureCognitiveSearchRetriever(content_key="content") Now you can use retrieve documents from Azure Cognitive Search retriever.get_relevant_documents("what is langchain") previous AWS Kendra next ChatGPT Plugin Contents Set up Azure Cognitive Search Using the Azure Cognitive Search Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/azure_cognitive_search.html
8910a72533a0-0
.ipynb .pdf TF-IDF Contents Create New Retriever with Texts Create a New Retriever with Documents Use Retriever TF-IDF# TF-IDF means term-frequency times inverse document-frequency. This notebook goes over how to use a retriever that under the hood uses TF-IDF using scikit-learn package. For more information on the details of TF-IDF see this blog post. # !pip install scikit-learn from langchain.retrievers import TFIDFRetriever Create New Retriever with Texts# retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"]) Create a New Retriever with Documents# You can now create a new retriever with the documents you created. from langchain.schema import Document retriever = TFIDFRetriever.from_documents([Document(page_content="foo"), Document(page_content="bar"), Document(page_content="world"), Document(page_content="hello"), Document(page_content="foo bar")]) Use Retriever# We can now use the retriever! result = retriever.get_relevant_documents("foo") result [Document(page_content='foo', metadata={}), Document(page_content='foo bar', metadata={}), Document(page_content='hello', metadata={}), Document(page_content='world', metadata={})] previous SVM next Time Weighted VectorStore Contents Create New Retriever with Texts Create a New Retriever with Documents Use Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/tf_idf.html
9221c2b71a88-0
.ipynb .pdf Vespa Vespa# Vespa is a fully featured search engine and vector database. It supports vector search (ANN), lexical search, and search in structured data, all in the same query. This notebook shows how to use Vespa.ai as a LangChain retriever. In order to create a retriever, we use pyvespa to create a connection a Vespa service. #!pip install pyvespa from vespa.application import Vespa vespa_app = Vespa(url="https://doc-search.vespa.oath.cloud") This creates a connection to a Vespa service, here the Vespa documentation search service. Using pyvespa package, you can also connect to a Vespa Cloud instance or a local Docker instance. After connecting to the service, you can set up the retriever: from langchain.retrievers.vespa_retriever import VespaRetriever vespa_query_body = { "yql": "select content from paragraph where userQuery()", "hits": 5, "ranking": "documentation", "locale": "en-us" } vespa_content_field = "content" retriever = VespaRetriever(vespa_app, vespa_query_body, vespa_content_field) This sets up a LangChain retriever that fetches documents from the Vespa application. Here, up to 5 results are retrieved from the content field in the paragraph document type, using doumentation as the ranking method. The userQuery() is replaced with the actual query passed from LangChain. Please refer to the pyvespa documentation for more information. Now you can return the results and continue using the results in LangChain. retriever.get_relevant_documents("what is vespa?")
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vespa.html
9221c2b71a88-1
retriever.get_relevant_documents("what is vespa?") previous VectorStore next Weaviate Hybrid Search By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vespa.html
440a546753af-0
.ipynb .pdf Zep Contents Retriever Example Initialize the Zep Chat Message History Class and add a chat message history to the memory store Use the Zep Retriever to vector search over the Zep memory Zep# Zep - A long-term memory store for LLM applications. More on Zep: Zep stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. Key Features: Long-term memory persistence, with access to historical messages irrespective of your summarization strategy. Auto-summarization of memory messages based on a configurable message window. A series of summaries are stored, providing flexibility for future summarization strategies. Vector search over memories, with messages automatically embedded on creation. Auto-token counting of memories and summaries, allowing finer-grained control over prompt assembly. Python and JavaScript SDKs. Zep’s Go Extractor model is easily extensible, with a simple, clean interface available to build new enrichment functionality, such as summarizers, entity extractors, embedders, and more. Zep project: getzep/zep Retriever Example# This notebook demonstrates how to search historical chat message histories using the Zep Long-term Memory Store. We’ll demonstrate: Adding conversation history to the Zep memory store. Vector search over the conversation history. from langchain.memory.chat_message_histories import ZepChatMessageHistory from langchain.schema import HumanMessage, AIMessage from uuid import uuid4 # Set this to your Zep server URL ZEP_API_URL = "http://localhost:8000" Initialize the Zep Chat Message History Class and add a chat message history to the memory store#
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-1
Initialize the Zep Chat Message History Class and add a chat message history to the memory store# NOTE: Unlike other Retrievers, the content returned by the Zep Retriever is session/user specific. A session_id is required when instantiating the Retriever. session_id = str(uuid4()) # This is a unique identifier for the user/session # Set up Zep Chat History. We'll use this to add chat histories to the memory store zep_chat_history = ZepChatMessageHistory( session_id=session_id, url=ZEP_API_URL, ) # Preload some messages into the memory. The default message window is 12 messages. We want to push beyond this to demonstrate auto-summarization. test_history = [ {"role": "human", "content": "Who was Octavia Butler?"}, { "role": "ai", "content": ( "Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American" " science fiction author." ), }, {"role": "human", "content": "Which books of hers were made into movies?"}, { "role": "ai", "content": ( "The most well-known adaptation of Octavia Butler's work is the FX series" " Kindred, based on her novel of the same name." ), }, {"role": "human", "content": "Who were her contemporaries?"}, { "role": "ai", "content": ( "Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R." " Delany, and Joanna Russ." ), },
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-2
" Delany, and Joanna Russ." ), }, {"role": "human", "content": "What awards did she win?"}, { "role": "ai", "content": ( "Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur" " Fellowship." ), }, { "role": "human", "content": "Which other women sci-fi writers might I want to read?", }, { "role": "ai", "content": "You might want to read Ursula K. Le Guin or Joanna Russ.", }, { "role": "human", "content": ( "Write a short synopsis of Butler's book, Parable of the Sower. What is it" " about?" ), }, { "role": "ai", "content": ( "Parable of the Sower is a science fiction novel by Octavia Butler," " published in 1993. It follows the story of Lauren Olamina, a young woman" " living in a dystopian future where society has collapsed due to" " environmental disasters, poverty, and violence." ), }, ] for msg in test_history: zep_chat_history.append( HumanMessage(content=msg["content"]) if msg["role"] == "human" else AIMessage(content=msg["content"]) ) Use the Zep Retriever to vector search over the Zep memory# Zep provides native vector search over historical conversation memory. Embedding happens automatically.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-3
Zep provides native vector search over historical conversation memory. Embedding happens automatically. NOTE: Embedding of messages occurs asynchronously, so the first query may not return results. Subsequent queries will return results as the embeddings are generated. from langchain.retrievers import ZepRetriever zep_retriever = ZepRetriever( session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever url=ZEP_API_URL, top_k=5, ) await zep_retriever.aget_relevant_documents("Who wrote Parable of the Sower?") [Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759001673780126, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}), Document(page_content="Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.", metadata={'score': 0.7602262941130749, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-4
Document(page_content='Who were her contemporaries?', metadata={'score': 0.757553366415519, 'uuid': '41f9c41a-a205-41e1-b48b-a0a4cd943fc8', 'created_at': '2023-05-25T15:03:30.243995Z', 'role': 'human', 'token_count': 8}), Document(page_content='Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American science fiction author.', metadata={'score': 0.7546211059317948, 'uuid': '34678311-0098-4f1a-8fd4-5615ac692deb', 'created_at': '2023-05-25T15:03:30.231427Z', 'role': 'ai', 'token_count': 31}), Document(page_content='Which books of hers were made into movies?', metadata={'score': 0.7496714959247069, 'uuid': '18046c3a-9666-4d3e-b4f0-43d1394732b7', 'created_at': '2023-05-25T15:03:30.236837Z', 'role': 'human', 'token_count': 11})] We can also use the Zep sync API to retrieve results: zep_retriever.get_relevant_documents("Who wrote Parable of the Sower?")
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-5
[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897321402776546, 'uuid': '1c09603a-52c1-40d7-9d69-29f26256029c', 'created_at': '2023-05-25T15:03:30.268257Z', 'role': 'ai', 'token_count': 56}), Document(page_content="Write a short synopsis of Butler's book, Parable of the Sower. What is it about?", metadata={'score': 0.8857628682610436, 'uuid': 'f6706e8c-6c91-452f-8c1b-9559fd924657', 'created_at': '2023-05-25T15:03:30.265302Z', 'role': 'human', 'token_count': 23}), Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759670375149477, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
440a546753af-6
Document(page_content="Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.", metadata={'score': 0.7602854653476563, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}), Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7595293992240313, 'uuid': 'f22f2498-6118-4c74-8718-aa89ccd7e3d6', 'created_at': '2023-05-25T15:03:30.261198Z', 'role': 'ai', 'token_count': 18})] previous Wikipedia next Chains Contents Retriever Example Initialize the Zep Chat Message History Class and add a chat message history to the memory store Use the Zep Retriever to vector search over the Zep memory By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/zep_memorystore.html
15c22005e0bd-0
.ipynb .pdf Self-querying with Qdrant Contents Creating a Qdrant vectorstore Creating our self-querying retriever Testing it out Filter k Self-querying with Qdrant# Qdrant (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. Qdrant is tailored to extended filtering support. It makes it useful In the notebook we’ll demo the SelfQueryRetriever wrapped around a Qdrant vector store. Creating a Qdrant vectorstore# First we’ll want to create a Chroma VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies. NOTE: The self-query retriever requires you to have lark installed (pip install lark). We also need the qdrant-client package. #!pip install lark qdrant-client We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. # import os # import getpass # os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') from langchain.schema import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Qdrant embeddings = OpenAIEmbeddings() docs = [ Document(page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}), Document(page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
15c22005e0bd-1
Document(page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}), Document(page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}), Document(page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}), Document(page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={"year": 1979, "rating": 9.9, "director": "Andrei Tarkovsky", "genre": "science fiction", "rating": 9.9}) ] vectorstore = Qdrant.from_documents( docs, embeddings, location=":memory:", # Local mode with in-memory storage only collection_name="my_documents", ) Creating our self-querying retriever# Now we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents. from langchain.llms import OpenAI from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.chains.query_constructor.base import AttributeInfo metadata_field_info=[ AttributeInfo( name="genre", description="The genre of the movie", type="string or list[string]", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
15c22005e0bd-2
type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm = OpenAI(temperature=0) retriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True) Testing it out# And now we can try actually using our retriever! # This example only specifies a relevant query retriever.get_relevant_documents("What are some movies about dinosaurs") query='dinosaur' filter=None limit=None [Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}), Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}), Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6})] # This example only specifies a filter retriever.get_relevant_documents("I want to watch a movie rated higher than 8.5")
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
15c22005e0bd-3
query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) limit=None [Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6})] # This example specifies a query and a filter retriever.get_relevant_documents("Has Greta Gerwig directed any movies about women") query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None [Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3})] # This example specifies a composite filter retriever.get_relevant_documents("What's a highly rated (above 8.5) science fiction film?") query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction')]) limit=None
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
15c22005e0bd-4
[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})] # This example specifies a query and composite filter retriever.get_relevant_documents("What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated") query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')]) limit=None [Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})] Filter k# We can also use the self query retriever to specify k: the number of documents to fetch. We can do this by passing enable_limit=True to the constructor. retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, enable_limit=True, verbose=True ) # This example only specifies a relevant query retriever.get_relevant_documents("what are two movies about dinosaurs") query='dinosaur' filter=None limit=2 [Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
15c22005e0bd-5
Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})] previous PubMed Retriever next Self-querying Contents Creating a Qdrant vectorstore Creating our self-querying retriever Testing it out Filter k By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/qdrant_self_query.html
7751380b00c9-0
.ipynb .pdf ChatGPT Plugin Contents Using the ChatGPT Retriever Plugin ChatGPT Plugin# OpenAI plugins connect ChatGPT to third-party applications. These plugins enable ChatGPT to interact with APIs defined by developers, enhancing ChatGPT’s capabilities and allowing it to perform a wide range of actions. Plugins can allow ChatGPT to do things like: Retrieve real-time information; e.g., sports scores, stock prices, the latest news, etc. Retrieve knowledge-base information; e.g., company docs, personal notes, etc. Perform actions on behalf of the user; e.g., booking a flight, ordering food, etc. This notebook shows how to use the ChatGPT Retriever Plugin within LangChain. # STEP 1: Load # Load documents using LangChain's DocumentLoaders # This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html from langchain.document_loaders.csv_loader import CSVLoader loader = CSVLoader(file_path='../../document_loaders/examples/example_data/mlb_teams_2012.csv') data = loader.load() # STEP 2: Convert # Convert Document to format expected by https://github.com/openai/chatgpt-retrieval-plugin from typing import List from langchain.docstore.document import Document import json def write_json(path: str, documents: List[Document])-> None: results = [{"text": doc.page_content} for doc in documents] with open(path, "w") as f: json.dump(results, f, indent=2) write_json("foo.json", data) # STEP 3: Use # Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html
7751380b00c9-1
Using the ChatGPT Retriever Plugin# Okay, so we’ve created the ChatGPT Retriever Plugin, but how do we actually use it? The below code walks through how to do that. We want to use ChatGPTPluginRetriever so we have to get the OpenAI API Key. import os import getpass os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') from langchain.retrievers import ChatGPTPluginRetriever retriever = ChatGPTPluginRetriever(url="http://0.0.0.0:8000", bearer_token="foo") retriever.get_relevant_documents("alice's phone number") [Document(page_content="This is Alice's phone number: 123-456-7890", lookup_str='', metadata={'id': '456_0', 'metadata': {'source': 'email', 'source_id': '567', 'url': None, 'created_at': '1609592400.0', 'author': 'Alice', 'document_id': '456'}, 'embedding': None, 'score': 0.925571561}, lookup_index=0), Document(page_content='This is a document about something', lookup_str='', metadata={'id': '123_0', 'metadata': {'source': 'file', 'source_id': 'https://example.com/doc1', 'url': 'https://example.com/doc1', 'created_at': '1609502400.0', 'author': 'Alice', 'document_id': '123'}, 'embedding': None, 'score': 0.6987589}, lookup_index=0),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html
7751380b00c9-2
Document(page_content='Team: Angels "Payroll (millions)": 154.49 "Wins": 89', lookup_str='', metadata={'id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631_0', 'metadata': {'source': None, 'source_id': None, 'url': None, 'created_at': None, 'author': None, 'document_id': '59c2c0c1-ae3f-4272-a1da-f44a723ea631'}, 'embedding': None, 'score': 0.697888613}, lookup_index=0)] previous Azure Cognitive Search next Self-querying with Chroma Contents Using the ChatGPT Retriever Plugin By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chatgpt-plugin.html
989b78f99164-0
.ipynb .pdf Contextual Compression Contents Contextual Compression Using a vanilla vector store retriever Adding contextual compression with an LLMChainExtractor More built-in compressors: filters LLMChainFilter EmbeddingsFilter Stringing compressors and document transformers together Contextual Compression# This notebook introduces the concept of DocumentCompressors and the ContextualCompressionRetriever. The core idea is simple: given a specific query, we should be able to return only the documents relevant to that query, and only the parts of those documents that are relevant. The ContextualCompressionsRetriever is a wrapper for another retriever that iterates over the initial output of the base retriever and filters and compresses those initial documents, so that only the most relevant information is returned. # Helper function for printing docs def pretty_print_docs(docs): print(f"\n{'-' * 100}\n".join([f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)])) Using a vanilla vector store retriever# Let’s start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can see that given an example question our retriever returns one or two relevant docs and a few irrelevant docs. And even the relevant docs have a lot of irrelevant information in them. from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.document_loaders import TextLoader from langchain.vectorstores import FAISS documents = TextLoader('../../../state_of_the_union.txt').load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents)
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-1
texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever() docs = retriever.get_relevant_documents("What did the president say about Ketanji Brown Jackson") pretty_print_docs(docs) Document 1: Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ---------------------------------------------------------------------------------------------------- Document 2: A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-2
We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. ---------------------------------------------------------------------------------------------------- Document 3: And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. First, beat the opioid epidemic. ---------------------------------------------------------------------------------------------------- Document 4: Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. That ends on my watch. Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-3
Let’s pass the Paycheck Fairness Act and paid leave. Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. Adding contextual compression with an LLMChainExtractor# Now let’s wrap our base retriever with a ContextualCompressionRetriever. We’ll add an LLMChainExtractor, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query. from langchain.llms import OpenAI from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import LLMChainExtractor llm = OpenAI(temperature=0) compressor = LLMChainExtractor.from_llm(llm) compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever) compressed_docs = compression_retriever.get_relevant_documents("What did the president say about Ketanji Jackson Brown") pretty_print_docs(compressed_docs) Document 1: "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence." ---------------------------------------------------------------------------------------------------- Document 2:
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-4
---------------------------------------------------------------------------------------------------- Document 2: "A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans." More built-in compressors: filters# LLMChainFilter# The LLMChainFilter is slightly simpler but more robust compressor that uses an LLM chain to decide which of the initially retrieved documents to filter out and which ones to return, without manipulating the document contents. from langchain.retrievers.document_compressors import LLMChainFilter _filter = LLMChainFilter.from_llm(llm) compression_retriever = ContextualCompressionRetriever(base_compressor=_filter, base_retriever=retriever) compressed_docs = compression_retriever.get_relevant_documents("What did the president say about Ketanji Jackson Brown") pretty_print_docs(compressed_docs) Document 1: Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. EmbeddingsFilter#
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-5
EmbeddingsFilter# Making an extra LLM call over each retrieved document is expensive and slow. The EmbeddingsFilter provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query. from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import EmbeddingsFilter embeddings = OpenAIEmbeddings() embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) compression_retriever = ContextualCompressionRetriever(base_compressor=embeddings_filter, base_retriever=retriever) compressed_docs = compression_retriever.get_relevant_documents("What did the president say about Ketanji Jackson Brown") pretty_print_docs(compressed_docs) Document 1: Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ---------------------------------------------------------------------------------------------------- Document 2:
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-6
---------------------------------------------------------------------------------------------------- Document 2: A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. ---------------------------------------------------------------------------------------------------- Document 3: And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. First, beat the opioid epidemic. Stringing compressors and document transformers together#
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-7
First, beat the opioid epidemic. Stringing compressors and document transformers together# Using the DocumentCompressorPipeline we can also easily combine multiple compressors in sequence. Along with compressors we can add BaseDocumentTransformers to our pipeline, which don’t perform any contextual compression but simply perform some transformation on a set of documents. For example TextSplitters can be used as document transformers to split documents into smaller pieces, and the EmbeddingsRedundantFilter can be used to filter out redundant documents based on embedding similarity between documents. Below we create a compressor pipeline by first splitting our docs into smaller chunks, then removing redundant documents, and then filtering based on relevance to the query. from langchain.document_transformers import EmbeddingsRedundantFilter from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain.text_splitter import CharacterTextSplitter splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ") redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter] ) compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever) compressed_docs = compression_retriever.get_relevant_documents("What did the president say about Ketanji Jackson Brown") pretty_print_docs(compressed_docs) Document 1: One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson ---------------------------------------------------------------------------------------------------- Document 2:
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
989b78f99164-8
---------------------------------------------------------------------------------------------------- Document 2: As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year ---------------------------------------------------------------------------------------------------- Document 3: A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder previous Cohere Reranker next Databerry Contents Contextual Compression Using a vanilla vector store retriever Adding contextual compression with an LLMChainExtractor More built-in compressors: filters LLMChainFilter EmbeddingsFilter Stringing compressors and document transformers together By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/contextual-compression.html
d7797d46d7af-0
.ipynb .pdf Pinecone Hybrid Search Contents Setup Pinecone Get embeddings and sparse encoders Load Retriever Add texts (if necessary) Use Retriever Pinecone Hybrid Search# Pinecone is a vector database with broad functionality. This notebook goes over how to use a retriever that under the hood uses Pinecone and Hybrid Search. The logic of this retriever is taken from this documentaion To use Pinecone, you must have an API key and an Environment. Here are the installation instructions. #!pip install pinecone-client pinecone-text import os import getpass os.environ['PINECONE_API_KEY'] = getpass.getpass('Pinecone API Key:') from langchain.retrievers import PineconeHybridSearchRetriever os.environ['PINECONE_ENVIRONMENT'] = getpass.getpass('Pinecone Environment:') We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') Setup Pinecone# You should only have to do this part once. Note: it’s important to make sure that the “context” field that holds the document text in the metadata is not indexed. Currently you need to specify explicitly the fields you do want to index. For more information checkout Pinecone’s docs. import os import pinecone api_key = os.getenv("PINECONE_API_KEY") or "PINECONE_API_KEY" # find environment next to your API key in the Pinecone console env = os.getenv("PINECONE_ENVIRONMENT") or "PINECONE_ENVIRONMENT" index_name = "langchain-pinecone-hybrid-search" pinecone.init(api_key=api_key, enviroment=env)
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html
d7797d46d7af-1
pinecone.init(api_key=api_key, enviroment=env) pinecone.whoami() WhoAmIResponse(username='load', user_label='label', projectname='load-test') # create the index pinecone.create_index( name = index_name, dimension = 1536, # dimensionality of dense model metric = "dotproduct", # sparse values supported only for dotproduct pod_type = "s1", metadata_config={"indexed": []} # see explaination above ) Now that its created, we can use it index = pinecone.Index(index_name) Get embeddings and sparse encoders# Embeddings are used for the dense vectors, tokenizer is used for the sparse vector from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() To encode the text to sparse values you can either choose SPLADE or BM25. For out of domain tasks we recommend using BM25. For more information about the sparse encoders you can checkout pinecone-text library docs. from pinecone_text.sparse import BM25Encoder # or from pinecone_text.sparse import SpladeEncoder if you wish to work with SPLADE # use default tf-idf values bm25_encoder = BM25Encoder().default() The above code is using default tfids values. It’s highly recommended to fit the tf-idf values to your own corpus. You can do it as follow: corpus = ["foo", "bar", "world", "hello"] # fit tf-idf values on your corpus bm25_encoder.fit(corpus) # store the values to a json file bm25_encoder.dump("bm25_values.json") # load to your BM25Encoder object bm25_encoder = BM25Encoder().load("bm25_values.json") Load Retriever#
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html
d7797d46d7af-2
Load Retriever# We can now construct the retriever! retriever = PineconeHybridSearchRetriever(embeddings=embeddings, sparse_encoder=bm25_encoder, index=index) Add texts (if necessary)# We can optionally add texts to the retriever (if they aren’t already in there) retriever.add_texts(["foo", "bar", "world", "hello"]) 100%|██████████| 1/1 [00:02<00:00, 2.27s/it] Use Retriever# We can now use the retriever! result = retriever.get_relevant_documents("foo") result[0] Document(page_content='foo', metadata={}) previous Metal next PubMed Retriever Contents Setup Pinecone Get embeddings and sparse encoders Load Retriever Add texts (if necessary) Use Retriever By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/pinecone_hybrid_search.html
214c45ef31c4-0
.ipynb .pdf VectorStore Contents Maximum Marginal Relevance Retrieval Similarity Score Threshold Retrieval Specifying top k VectorStore# The index - and therefore the retriever - that LangChain has the most support for is the VectorStoreRetriever. As the name suggests, this retriever is backed heavily by a VectorStore. Once you construct a VectorStore, its very easy to construct a retriever. Let’s walk through an example. from langchain.document_loaders import TextLoader loader = TextLoader('../../../state_of_the_union.txt') from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = FAISS.from_documents(texts, embeddings) Exiting: Cleaning up .chroma directory retriever = db.as_retriever() docs = retriever.get_relevant_documents("what did he say about ketanji brown jackson") Maximum Marginal Relevance Retrieval# By default, the vectorstore retriever uses similarity search. If the underlying vectorstore support maximum marginal relevance search, you can specify that as the search type. retriever = db.as_retriever(search_type="mmr") docs = retriever.get_relevant_documents("what did he say abotu ketanji brown jackson") Similarity Score Threshold Retrieval# You can also use a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold retriever = db.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .5})
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vectorstore.html
214c45ef31c4-1
docs = retriever.get_relevant_documents("what did he say abotu ketanji brown jackson") Specifying top k# You can also specify search kwargs like k to use when doing retrieval. retriever = db.as_retriever(search_kwargs={"k": 1}) docs = retriever.get_relevant_documents("what did he say abotu ketanji brown jackson") len(docs) 1 previous Time Weighted VectorStore next Vespa Contents Maximum Marginal Relevance Retrieval Similarity Score Threshold Retrieval Specifying top k By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/vectorstore.html
a9607cc3ce89-0
.ipynb .pdf Self-querying Contents Creating a Pinecone index Creating our self-querying retriever Testing it out Filter k Self-querying# In the notebook we’ll demo the SelfQueryRetriever, which, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to it’s underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documented, but to also extract filters from the user query on the metadata of stored documents and to execute those filters. Creating a Pinecone index# First we’ll want to create a Pinecone VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies. To use Pinecone, you to have pinecone package installed and you must have an API key and an Environment. Here are the installation instructions. NOTE: The self-query retriever requires you to have lark package installed. # !pip install lark #!pip install pinecone-client import os import pinecone pinecone.init(api_key=os.environ["PINECONE_API_KEY"], environment=os.environ["PINECONE_ENV"]) /Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pinecone/index.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console) from tqdm.autonotebook import tqdm from langchain.schema import Document from langchain.embeddings.openai import OpenAIEmbeddings
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
a9607cc3ce89-1
from langchain.schema import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Pinecone embeddings = OpenAIEmbeddings() # create new index pinecone.create_index("langchain-self-retriever-demo", dimension=1536) docs = [ Document(page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": ["action", "science fiction"]}), Document(page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}), Document(page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}), Document(page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}), Document(page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}), Document(page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={"year": 1979, "rating": 9.9, "director": "Andrei Tarkovsky", "genre": ["science fiction", "thriller"], "rating": 9.9}) ] vectorstore = Pinecone.from_documents( docs, embeddings, index_name="langchain-self-retriever-demo" ) Creating our self-querying retriever#
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
a9607cc3ce89-2
) Creating our self-querying retriever# Now we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents. from langchain.llms import OpenAI from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.chains.query_constructor.base import AttributeInfo metadata_field_info=[ AttributeInfo( name="genre", description="The genre of the movie", type="string or list[string]", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm = OpenAI(temperature=0) retriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True) Testing it out# And now we can try actually using our retriever! # This example only specifies a relevant query retriever.get_relevant_documents("What are some movies about dinosaurs") query='dinosaur' filter=None [Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': ['action', 'science fiction'], 'rating': 7.7, 'year': 1993.0}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
a9607cc3ce89-3
Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}), Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'director': 'Christopher Nolan', 'rating': 8.2, 'year': 2010.0})] # This example only specifies a filter retriever.get_relevant_documents("I want to watch a movie rated higher than 8.5") query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) [Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0}), Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})] # This example specifies a query and a filter retriever.get_relevant_documents("Has Greta Gerwig directed any movies about women") query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig')
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
a9607cc3ce89-4
[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'director': 'Greta Gerwig', 'rating': 8.3, 'year': 2019.0})] # This example specifies a composite filter retriever.get_relevant_documents("What's a highly rated (above 8.5) science fiction film?") query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction'), Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5)]) [Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': ['science fiction', 'thriller'], 'rating': 9.9, 'year': 1979.0})] # This example specifies a query and composite filter retriever.get_relevant_documents("What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated") query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990.0), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005.0), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')]) [Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0})] Filter k# We can also use the self query retriever to specify k: the number of documents to fetch.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
a9607cc3ce89-5
We can do this by passing enable_limit=True to the constructor. retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, enable_limit=True, verbose=True ) # This example only specifies a relevant query retriever.get_relevant_documents("What are two movies about dinosaurs") previous Self-querying with Qdrant next SVM Contents Creating a Pinecone index Creating our self-querying retriever Testing it out Filter k By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/self_query.html
21705101c034-0
.ipynb .pdf Self-querying with Chroma Contents Creating a Chroma vectorstore Creating our self-querying retriever Testing it out Filter k Self-querying with Chroma# Chroma is a database for building AI applications with embeddings. In the notebook we’ll demo the SelfQueryRetriever wrapped around a Chroma vector store. Creating a Chroma vectorstore# First we’ll want to create a Chroma VectorStore and seed it with some data. We’ve created a small demo set of documents that contain summaries of movies. NOTE: The self-query retriever requires you to have lark installed (pip install lark). We also need the chromadb package. #!pip install lark #!pip install chromadb We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. import os import getpass os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') from langchain.schema import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma embeddings = OpenAIEmbeddings() docs = [ Document(page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}), Document(page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}), Document(page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
21705101c034-1
Document(page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}), Document(page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}), Document(page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={"year": 1979, "rating": 9.9, "director": "Andrei Tarkovsky", "genre": "science fiction", "rating": 9.9}) ] vectorstore = Chroma.from_documents( docs, embeddings ) Using embedded DuckDB without persistence: data will be transient Creating our self-querying retriever# Now we can instantiate our retriever. To do this we’ll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents. from langchain.llms import OpenAI from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.chains.query_constructor.base import AttributeInfo metadata_field_info=[ AttributeInfo( name="genre", description="The genre of the movie", type="string or list[string]", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ]
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
21705101c034-2
type="float" ), ] document_content_description = "Brief summary of a movie" llm = OpenAI(temperature=0) retriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_description, metadata_field_info, verbose=True) Testing it out# And now we can try actually using our retriever! # This example only specifies a relevant query retriever.get_relevant_documents("What are some movies about dinosaurs") query='dinosaur' filter=None [Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}), Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}), Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'year': 2010, 'director': 'Christopher Nolan', 'rating': 8.2})] # This example only specifies a filter retriever.get_relevant_documents("I want to watch a movie rated higher than 8.5") query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) [Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
21705101c034-3
Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})] # This example specifies a query and a filter retriever.get_relevant_documents("Has Greta Gerwig directed any movies about women") query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') [Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3})] # This example specifies a composite filter retriever.get_relevant_documents("What's a highly rated (above 8.5) science fiction film?") query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction'), Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5)]) [Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction'})] # This example specifies a query and composite filter retriever.get_relevant_documents("What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated")
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
21705101c034-4
query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')]) [Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})] Filter k# We can also use the self query retriever to specify k: the number of documents to fetch. We can do this by passing enable_limit=True to the constructor. retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, enable_limit=True, verbose=True ) # This example only specifies a relevant query retriever.get_relevant_documents("what are two movies about dinosaurs") query='dinosaur' filter=None [Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}), Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}),
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
21705101c034-5
Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'year': 2010, 'director': 'Christopher Nolan', 'rating': 8.2})] previous ChatGPT Plugin next Cohere Reranker Contents Creating a Chroma vectorstore Creating our self-querying retriever Testing it out Filter k By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/chroma_self_query.html
127fa9812842-0
.ipynb .pdf Arxiv Contents Installation Examples Running retriever Question Answering on facts Arxiv# arXiv is an open-access archive for 2 million scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics. This notebook shows how to retrieve scientific articles from Arxiv.org into the Document format that is used downstream. Installation# First, you need to install arxiv python package. #!pip install arxiv ArxivRetriever has these arguments: optional load_max_docs: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments. There is a hard limit of 300 for now. optional load_all_available_meta: default=False. By default only the most important fields downloaded: Published (date when document was published/last updated), Title, Authors, Summary. If True, other fields also downloaded. get_relevant_documents() has one argument, query: free text which used to find documents in Arxiv.org Examples# Running retriever# from langchain.retrievers import ArxivRetriever retriever = ArxivRetriever(load_max_docs=2) docs = retriever.get_relevant_documents(query='1605.08386') docs[0].metadata # meta-information of the Document {'Published': '2016-05-26', 'Title': 'Heat-bath random walks with Markov bases', 'Authors': 'Caprice Stanley, Tobias Windisch',
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html
127fa9812842-1
'Authors': 'Caprice Stanley, Tobias Windisch', 'Summary': 'Graphs on lattice points are studied whose edges come from a finite set of\nallowed moves of arbitrary length. We show that the diameter of these graphs on\nfibers of a fixed integer matrix can be bounded from above by a constant. We\nthen study the mixing behaviour of heat-bath random walks on these graphs. We\nalso state explicit conditions on the set of moves so that the heat-bath random\nwalk, a generalization of the Glauber dynamics, is an expander in fixed\ndimension.'} docs[0].page_content[:400] # a content of the Document 'arXiv:1605.08386v1 [math.CO] 26 May 2016\nHEAT-BATH RANDOM WALKS WITH MARKOV BASES\nCAPRICE STANLEY AND TOBIAS WINDISCH\nAbstract. Graphs on lattice points are studied whose edges come from a finite set of\nallowed moves of arbitrary length. We show that the diameter of these graphs on fibers of a\nfixed integer matrix can be bounded from above by a constant. We then study the mixing\nbehaviour of heat-b' Question Answering on facts# # get a token: https://platform.openai.com/account/api-keys from getpass import getpass OPENAI_API_KEY = getpass() import os os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain model = ChatOpenAI(model_name='gpt-3.5-turbo') # switch to 'gpt-4' qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever) questions = [
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html
127fa9812842-2
questions = [ "What are Heat-bath random walks with Markov base?", "What is the ImageBind model?", "How does Compositional Reasoning with Large Language Models works?", ] chat_history = [] for question in questions: result = qa({"question": question, "chat_history": chat_history}) chat_history.append((question, result['answer'])) print(f"-> **Question**: {question} \n") print(f"**Answer**: {result['answer']} \n") -> **Question**: What are Heat-bath random walks with Markov base? **Answer**: I'm not sure, as I don't have enough context to provide a definitive answer. The term "Heat-bath random walks with Markov base" is not mentioned in the given text. Could you provide more information or context about where you encountered this term? -> **Question**: What is the ImageBind model? **Answer**: ImageBind is an approach developed by Facebook AI Research to learn a joint embedding across six different modalities, including images, text, audio, depth, thermal, and IMU data. The approach uses the binding property of images to align each modality's embedding to image embeddings and achieve an emergent alignment across all modalities. This enables novel multimodal capabilities, including cross-modal retrieval, embedding-space arithmetic, and audio-to-image generation, among others. The approach sets a new state-of-the-art on emergent zero-shot recognition tasks across modalities, outperforming specialist supervised models. Additionally, it shows strong few-shot recognition results and serves as a new way to evaluate vision models for visual and non-visual tasks. -> **Question**: How does Compositional Reasoning with Large Language Models works?
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html
127fa9812842-3
-> **Question**: How does Compositional Reasoning with Large Language Models works? **Answer**: Compositional reasoning with large language models refers to the ability of these models to correctly identify and represent complex concepts by breaking them down into smaller, more basic parts and combining them in a structured way. This involves understanding the syntax and semantics of language and using that understanding to build up more complex meanings from simpler ones. In the context of the paper "Does CLIP Bind Concepts? Probing Compositionality in Large Image Models", the authors focus specifically on the ability of a large pretrained vision and language model (CLIP) to encode compositional concepts and to bind variables in a structure-sensitive way. They examine CLIP's ability to compose concepts in a single-object setting, as well as in situations where concept binding is needed. The authors situate their work within the tradition of research on compositional distributional semantics models (CDSMs), which seek to bridge the gap between distributional models and formal semantics by building architectures which operate over vectors yet still obey traditional theories of linguistic composition. They compare the performance of CLIP with several architectures from research on CDSMs to evaluate its ability to encode and reason about compositional concepts. questions = [ "What are Heat-bath random walks with Markov base? Include references to answer.", ] chat_history = [] for question in questions: result = qa({"question": question, "chat_history": chat_history}) chat_history.append((question, result['answer'])) print(f"-> **Question**: {question} \n") print(f"**Answer**: {result['answer']} \n") -> **Question**: What are Heat-bath random walks with Markov base? Include references to answer.
rtdocs_stable/api.python.langchain.com/en/stable/modules/indexes/retrievers/examples/arxiv.html