{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n<18> \n<19> \"\"\"\n<20> )\n<21> file.name = \"test.json\"\n<22> htmlparser = LocalHTMLParser(verbose=True)\n<23> pages = [page async for page in htmlparser.parse(file)]\n<24> assert len(pages) == 1\n<25> assert pages[0].page_num == 0\n<26> assert pages[0].offset == 0\n<27> assert (\n<28> pages[0].text\n<29> == \"Test title\\nTest header\\n Test paragraph one\\n Test paragraph two\\n Test paragraph three\\n -- Test hyphens --\"\n<30> )\n<31> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: io\n StringIO(initial_value: Optional[str]=..., newline: Optional[str]=...)\n \n at: io.StringIO\n name: Any\n \n at: scripts.prepdocslib.htmlparser\n LocalHTMLParser(verbose: bool=False)\n \n at: scripts.prepdocslib.htmlparser.LocalHTMLParser\n parse(content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.page.Page.__init__\n self.page_num = page_num\n \n self.offset = offset\n \n self.text = text\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n - if self.verbose:\n + logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n - print(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 2===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 9===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 10===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 13===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 18===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 19===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 20===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + "}}},{"rowIdx":5811,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: with caplog.at_level(logging.INFO):\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1>: with pytest.raises(tenacity.RetryError):\n with pytest.raises(tenacity.RetryError):\n <2>: embeddings = AzureOpenAIEmbeddingService(\n embeddings = AzureOpenAIEmbeddingService(\n <3>: open_ai_service=\"x\",\n open_ai_service=\"x\",\n <4>: open_ai_deployment=\"x\",\n open_ai_deployment=\"x\",\n <5>: open_ai_model_name=\"text-embedding-ada-002\",\n open_ai_model_name=\"text-embedding-ada-002\",\n <6>: credential=MockAzureCredential(),\n credential=MockAzureCredential(),\n <7>: disable_batch=False,\n disable_batch=False,\n <8>: verbose=True,\n <9>: )\n )\n<10>: monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11>: await embeddings.create_embeddings(texts=[\"foo\"])\n await embeddings.create_embeddings(texts=[\"foo\"])\n<12>: captured = capsys.readouterr()\n<13>: assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys):\n <0> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1> with pytest.raises(tenacity.RetryError):\n <2> embeddings = AzureOpenAIEmbeddingService(\n <3> open_ai_service=\"x\",\n <4> open_ai_deployment=\"x\",\n <5> open_ai_model_name=\"text-embedding-ada-002\",\n <6> credential=MockAzureCredential(),\n <7> disable_batch=False,\n <8> verbose=True,\n <9> )\n<10> monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11> await embeddings.create_embeddings(texts=[\"foo\"])\n<12> captured = capsys.readouterr()\n<13> assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.logging\n caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]\n \n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: _pytest.python_api\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E]\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> \"RaisesContext[E]\"\n \n at: logging\n INFO = 20\n \n at: scripts.prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n at: scripts.prepdocslib.embeddings.OpenAIEmbeddings\n SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n \n create_embeddings(texts: List[str]) -> List[List[float]]\n \n at: tenacity\n RetryError(last_attempt: \"Future\")\n \n at: tenacity.wait\n wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0)\n \n at: tests.test_prepdocs\n create_rate_limit_client(*args, **kwargs)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 2===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 8===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 9===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 10===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 12===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 15===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 17===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 18===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + "}}},{"rowIdx":5812,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_ratelimiterror_single"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: with caplog.at_level(logging.INFO):\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1>: with pytest.raises(tenacity.RetryError):\n with pytest.raises(tenacity.RetryError):\n <2>: embeddings = AzureOpenAIEmbeddingService(\n embeddings = AzureOpenAIEmbeddingService(\n <3>: open_ai_service=\"x\",\n open_ai_service=\"x\",\n <4>: open_ai_deployment=\"x\",\n open_ai_deployment=\"x\",\n <5>: open_ai_model_name=\"text-embedding-ada-002\",\n open_ai_model_name=\"text-embedding-ada-002\",\n <6>: credential=MockAzureCredential(),\n credential=MockAzureCredential(),\n <7>: disable_batch=True,\n disable_batch=True,\n <8>: verbose=True,\n <9>: )\n )\n<10>: monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11>: await embeddings.create_embeddings(texts=[\"foo\"])\n await embeddings.create_embeddings(texts=[\"foo\"])\n<12>: captured = capsys.readouterr()\n<13>: assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n + async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog):\n - async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys):\n <0> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1> with pytest.raises(tenacity.RetryError):\n <2> embeddings = AzureOpenAIEmbeddingService(\n <3> open_ai_service=\"x\",\n <4> open_ai_deployment=\"x\",\n <5> open_ai_model_name=\"text-embedding-ada-002\",\n <6> credential=MockAzureCredential(),\n <7> disable_batch=True,\n <8> verbose=True,\n <9> )\n<10> monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11> await embeddings.create_embeddings(texts=[\"foo\"])\n<12> captured = capsys.readouterr()\n<13> assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.python_api\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E]\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> \"RaisesContext[E]\"\n \n at: logging\n INFO = 20\n \n at: scripts.prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n at: scripts.prepdocslib.embeddings.OpenAIEmbeddings\n create_embeddings(texts: List[str]) -> List[List[float]]\n \n at: tenacity\n RetryError(last_attempt: \"Future\")\n \n at: tenacity.wait\n wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0)\n \n at: tests.test_prepdocs\n create_rate_limit_client(*args, **kwargs)\n \n \n===========changed ref 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys):\n + with caplog.at_level(logging.INFO):\n + monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n - monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n + with pytest.raises(tenacity.RetryError):\n - with pytest.raises(tenacity.RetryError):\n + embeddings = AzureOpenAIEmbeddingService(\n - embeddings = AzureOpenAIEmbeddingService(\n + open_ai_service=\"x\",\n - open_ai_service=\"x\",\n + open_ai_deployment=\"x\",\n - open_ai_deployment=\"x\",\n + open_ai_model_name=\"text-embedding-ada-002\",\n - open_ai_model_name=\"text-embedding-ada-002\",\n + credential=MockAzureCredential(),\n - credential=MockAzureCredential(),\n + disable_batch=False,\n - disable_batch=False,\n - verbose=True,\n + )\n - )\n + monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n - monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n + await embeddings.create_embeddings(texts=[\"foo\"])\n - await embeddings.create_embeddings(texts=[\"foo\"])\n - captured = capsys.readouterr()\n + assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n - assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 2===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 9===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 10===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 13===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n "}}},{"rowIdx":5813,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_autherror"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <8>: verbose=True,\n<20>: verbose=True,\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_autherror(monkeypatch, capsys):\n <0> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1> with pytest.raises(openai.AuthenticationError):\n <2> embeddings = AzureOpenAIEmbeddingService(\n <3> open_ai_service=\"x\",\n <4> open_ai_deployment=\"x\",\n <5> open_ai_model_name=\"text-embedding-ada-002\",\n <6> credential=MockAzureCredential(),\n <7> disable_batch=False,\n <8> verbose=True,\n <9> )\n<10> monkeypatch.setattr(embeddings, \"create_client\", create_auth_error_limit_client)\n<11> await embeddings.create_embeddings(texts=[\"foo\"])\n<12> \n<13> with pytest.raises(openai.AuthenticationError):\n<14> embeddings = AzureOpenAIEmbeddingService(\n<15> open_ai_service=\"x\",\n<16> open_ai_deployment=\"x\",\n<17> open_ai_model_name=\"text-embedding-ada-002\",\n<18> credential=MockAzureCredential(),\n<19> disable_batch=True,\n<20> verbose=True,\n<21> )\n<22> monkeypatch.setattr(embeddings, \"create_client\", create_auth_error_limit_client)\n<23> await embeddings.create_embeddings(texts=[\"foo\"])\n<24> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.python_api\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E]\n raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> \"RaisesContext[E]\"\n \n at: scripts.prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n at: scripts.prepdocslib.embeddings.OpenAIEmbeddings\n create_embeddings(texts: List[str]) -> List[List[float]]\n \n at: tenacity.wait\n wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0)\n \n at: tests.test_prepdocs\n create_auth_error_limit_client(*args, **kwargs)\n \n \n===========changed ref 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n + async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog):\n - async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys):\n + with caplog.at_level(logging.INFO):\n + monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n - monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n + with pytest.raises(tenacity.RetryError):\n - with pytest.raises(tenacity.RetryError):\n + embeddings = AzureOpenAIEmbeddingService(\n - embeddings = AzureOpenAIEmbeddingService(\n + open_ai_service=\"x\",\n - open_ai_service=\"x\",\n + open_ai_deployment=\"x\",\n - open_ai_deployment=\"x\",\n + open_ai_model_name=\"text-embedding-ada-002\",\n - open_ai_model_name=\"text-embedding-ada-002\",\n + credential=MockAzureCredential(),\n - credential=MockAzureCredential(),\n + disable_batch=True,\n - disable_batch=True,\n - verbose=True,\n + )\n - )\n + monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n - monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n + await embeddings.create_embeddings(texts=[\"foo\"])\n - await embeddings.create_embeddings(texts=[\"foo\"])\n - captured = capsys.readouterr()\n + assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n - assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 1===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys):\n + with caplog.at_level(logging.INFO):\n + monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n - monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n + with pytest.raises(tenacity.RetryError):\n - with pytest.raises(tenacity.RetryError):\n + embeddings = AzureOpenAIEmbeddingService(\n - embeddings = AzureOpenAIEmbeddingService(\n + open_ai_service=\"x\",\n - open_ai_service=\"x\",\n + open_ai_deployment=\"x\",\n - open_ai_deployment=\"x\",\n + open_ai_model_name=\"text-embedding-ada-002\",\n - open_ai_model_name=\"text-embedding-ada-002\",\n + credential=MockAzureCredential(),\n - credential=MockAzureCredential(),\n + disable_batch=False,\n - disable_batch=False,\n - verbose=True,\n + )\n - )\n + monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n - monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n + await embeddings.create_embeddings(texts=[\"foo\"])\n - await embeddings.create_embeddings(texts=[\"foo\"])\n - captured = capsys.readouterr()\n + assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n - assert captured.out.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 10===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + "}}},{"rowIdx":5814,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService):\n if not embeddings:\n <1>: raise Exception(\"Expecting AzureOpenAI embedding service\")\n raise Exception(\"Expecting AzureOpenAI embedding Service\")\n<12>: self.search_info = search_info\n"},"main_code":{"kind":"string","value":" self,\n list_file_strategy: ListFileStrategy,\n blob_manager: BlobManager,\n + search_info: SearchInfo,\n embeddings: Optional[AzureOpenAIEmbeddingService],\n subscription_id: str,\n search_service_user_assigned_id: str,\n document_action: DocumentAction = DocumentAction.Add,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n category: Optional[str] = None,\n ):\n <0> if not embeddings:\n <1> raise Exception(\"Expecting AzureOpenAI embedding Service\")\n <2> \n <3> self.list_file_strategy = list_file_strategy\n <4> self.blob_manager = blob_manager\n <5> self.document_action = document_action\n <6> self.embeddings = embeddings\n <7> self.subscription_id = subscription_id\n <8> self.search_user_assigned_identity = search_service_user_assigned_id\n <9> self.search_analyzer_name = search_analyzer_name\n<10> self.use_acls = use_acls\n<11> self.category = category\n<12> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.blobmanager\n BlobManager(endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool=False)\n \n at: scripts.prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n at: scripts.prepdocslib.listfilestrategy\n ListFileStrategy()\n \n at: scripts.prepdocslib.strategy\n SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str)\n \n DocumentAction()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 2===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 8===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 9===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 10===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 12===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 15===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 17===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 18===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + \n===========changed ref 20===========\n # module: scripts.prepdocslib.listfilestrategy\n class ADLSGen2ListFileStrategy(ListFileStrategy):\n def __init__(\n self,\n data_lake_storage_account: str,\n data_lake_filesystem: str,\n data_lake_path: str,\n credential: Union[AsyncTokenCredential, str],\n - verbose: bool = False,\n ):\n self.data_lake_storage_account = data_lake_storage_account\n self.data_lake_filesystem = data_lake_filesystem\n self.data_lake_path = data_lake_path\n self.credential = credential\n - self.verbose = verbose\n "}}},{"rowIdx":5815,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.setup"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <1>: search_info=self.search_info,\n search_info=search_info,\n<15>: name=f\"{self.search_info.index_name}-vectorizer\",\n name=f\"{search_info.index_name}-vectorizer\",\n<26>: ds_client = self.search_info.create_search_indexer_client()\n ds_client = search_info.create_search_indexer_client()\n<29>: name=f\"{self.search_info.index_name}-blob\",\n name=f\"{search_info.index_name}-blob\",\n<37>: logger.info(\"Search indexer data source connection updated.\")\n print(\"Search indexer data source connection updated.\")\n<39>: embedding_skillset = await self.create_embedding_skill(self.search_info.index_name)\n embedding_skillset = await self.create_embedding_skill(search_info.index_\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.integratedvectorizerstrategy\n class IntegratedVectorizerStrategy(Strategy):\n + def setup(self):\n - def setup(self, search_info: SearchInfo):\n <0> search_manager = SearchManager(\n <1> search_info=search_info,\n <2> search_analyzer_name=self.search_analyzer_name,\n <3> use_acls=self.use_acls,\n <4> use_int_vectorization=True,\n <5> embeddings=self.embeddings,\n <6> search_images=False,\n <7> )\n <8> \n <9> if self.embeddings is None:\n<10> raise ValueError(\"Expecting Azure Open AI instance\")\n<11> \n<12> await search_manager.create_index(\n<13> vectorizers=[\n<14> AzureOpenAIVectorizer(\n<15> name=f\"{search_info.index_name}-vectorizer\",\n<16> kind=\"azureOpenAI\",\n<17> azure_open_ai_parameters=AzureOpenAIParameters(\n<18> resource_uri=f\"https://{self.embeddings.open_ai_service}.openai.azure.com\",\n<19> deployment_id=self.embeddings.open_ai_deployment,\n<20> ),\n<21> ),\n<22> ]\n<23> )\n<24> \n<25> # create indexer client\n<26> ds_client = search_info.create_search_indexer_client()\n<27> ds_container = SearchIndexerDataContainer(name=self.blob_manager.container)\n<28> data_source_connection = SearchIndexerDataSourceConnection(\n<29> name=f\"{search_info.index_name}-blob\",\n<30> type=\"azureblob\",\n<31> connection_string=self.blob_manager.get_managedidentity_connectionstring(),\n<32> container=ds_container,\n<33> data_deletion_detection_policy=NativeBlobSoftDeleteDeletionDetectionPolicy(),\n<34> )\n<35> \n<36> await ds_client.create_or_update_data_source_connection(data_source_connection)\n<37> print(\"Search indexer data source connection updated.\")\n<38> \n<39> embedding_skillset = await self.create_embedding_skill(search_info.index_"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n class IntegratedVectorizerStrategy(Strategy):\n + def setup(self):\n - def setup(self, search_info: SearchInfo):\n # offset: 1\n await ds_client.create_or_update_skillset(embedding_skillset)\n await ds_client.close()\n \n \n===========unchanged ref 0===========\n at: scripts.prepdocslib.blobmanager.BlobManager\n get_managedidentity_connectionstring()\n \n at: scripts.prepdocslib.blobmanager.BlobManager.__init__\n self.container = container\n \n at: scripts.prepdocslib.embeddings.AzureOpenAIEmbeddingService.__init__\n self.open_ai_service = open_ai_service\n \n self.open_ai_deployment = open_ai_deployment\n \n at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.__init__\n self.blob_manager = blob_manager\n \n self.embeddings = embeddings\n \n self.search_analyzer_name = search_analyzer_name\n \n self.use_acls = use_acls\n \n self.search_info = search_info\n \n at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.create_embedding_skill\n index_projections = SearchIndexerIndexProjections(\n selectors=[\n SearchIndexerIndexProjectionSelector(\n target_index_name=index_name,\n parent_key_field_name=\"parent_id\",\n source_context=\"/document/pages/*\",\n mappings=[\n InputFieldMappingEntry(name=\"content\", source=\"/document/pages/*\"),\n InputFieldMappingEntry(name=\"embedding\", source=\"/document/pages/*/vector\"),\n InputFieldMappingEntry(name=\"sourcepage\", source=\"/document/metadata_storage_name\"),\n ],\n ),\n ],\n parameters=SearchIndexerIndexProjectionsParameters(\n projection_mode=IndexProjectionMode.SKIP_INDEXING_PARENT_DOCUMENTS\n ),\n )\n \n skillset = SearchIndexerSkillset(\n name=skillset_name,\n description=\"Skillset to chunk documents and generate embeddings\",\n skills=[split_skill, embedding_skill],\n index_projections=index_projections,\n )\n \n \n===========unchanged ref 1===========\n at: scripts.prepdocslib.searchmanager\n SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False)\n \n at: scripts.prepdocslib.searchmanager.SearchManager\n create_index(vectorizers: Optional[List[VectorSearchVectorizer]]=None)\n \n at: scripts.prepdocslib.strategy.SearchInfo\n create_search_indexer_client() -> SearchIndexerClient\n \n at: scripts.prepdocslib.strategy.SearchInfo.__init__\n self.index_name = index_name\n \n at: scripts.prepdocslib.strategy.Strategy\n setup(self)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n - if self.search_info.verbose:\n + logger.info(f\"Ensuring search index {self.search_info.index_name} exists\")\n - print(f\"Ensuring search index {self.search_info.index_name} exists\")\n \n async with self.search_info.create_search_index_client() as search_index_client:\n fields = [\n (\n SimpleField(name=\"id\", type=\"Edm.String\", key=True)\n if not self.use_int_vectorization\n else SearchField(\n name=\"id\",\n type=\"Edm.String\",\n key=True,\n sortable=True,\n filterable=True,\n facetable=True,\n analyzer_name=\"keyword\",\n )\n ),\n SearchableField(\n name=\"content\",\n type=\"Edm.String\",\n analyzer_name=self.search_analyzer_name,\n ),\n SearchField(\n name=\"embedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1536,\n vector_search_profile_name=\"embedding_config\",\n ),\n SimpleField(name=\"category\", type=\"Edm.String\", filterable=True, facetable=True),\n SimpleField(\n name=\"sourcepage\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n SimpleField(\n name=\"sourcefile\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n ]\n if self.use_acls:\n fields.append(\n SimpleField(\n name=\"oids\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filter\n===========changed ref 1===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 1\n \n SimpleField(\n name=\"oids\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n fields.append(\n SimpleField(\n name=\"groups\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n if self.use_int_vectorization:\n fields.append(SearchableField(name=\"parent_id\", type=\"Edm.String\", filterable=True))\n if self.search_images:\n fields.append(\n SearchField(\n name=\"imageEmbedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1024,\n vector_search_profile_name=\"embedding_config\",\n ),\n )\n \n index = SearchIndex(\n name=self.search_info.index_name,\n fields=fields,\n semantic_search=SemanticSearch(\n configurations=[\n SemanticConfiguration(\n name=\"default\",\n prioritized_fields=SemanticPrioritizedFields(\n title_field=None, content_fields=[SemanticField(field_name=\"content\")]\n ),\n )\n ]\n ),\n vector_search=VectorSearch(\n algorithms=[\n HnswAlgorithmConfiguration(\n name=\"hnsw_config\",\n parameters=HnswParameters(metric=\"cosine\"),\n )\n ],\n profiles=[\n VectorSearchProfile(\n name=\"embedding_config\",\n algorithm_configuration_name=\"hnsw_config\",\n vectorizer=(\n f\"{self\n===========changed ref 2===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 2\n _info.index_name}-vectorizer\" if self.use_int_vectorization else None\n ),\n ),\n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]:\n - if self.search_info.verbose:\n + logger.info(f\"Creating {self.search_info.index_name} search index\")\n - print(f\"Creating {self.search_info.index_name} search index\")\n await search_index_client.create_index(index)\n else:\n - if self.search_info.verbose:\n + logger.info(f\"Search index {self.search_info.index_name} already exists\")\n - print(f\"Search index {self.search_info.index_name} already exists\")\n "}}},{"rowIdx":5816,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.run"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":"<16>: indexer_name = f\"{self.search_info.index_name}-indexer\"\n indexer_name = f\"{search_info.index_name}-indexer\"\n<21>: skillset_name=f\"{self.search_info.index_name}-skillset\",\n skillset_name=f\"{search_info.index_name}-skillset\",\n<22>: target_index_name=self.search_info.index_name,\n target_index_name=search_info.index_name,\n<23>: data_source_name=f\"{self.search_info.index_name}-blob\",\n data_source_name=f\"{search_info.index_name}-blob\",\n<28>: indexer_client = self.search_info.create_search_indexer_client()\n indexer_client = search_info.create_search_indexer_client()\n<35>: logger.info(\n print(\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.integratedvectorizerstrategy\n class IntegratedVectorizerStrategy(Strategy):\n + def run(self):\n - def run(self, search_info: SearchInfo):\n <0> if self.document_action == DocumentAction.Add:\n <1> files = self.list_file_strategy.list()\n <2> async for file in files:\n <3> try:\n <4> await self.blob_manager.upload_blob(file)\n <5> finally:\n <6> if file:\n <7> file.close()\n <8> elif self.document_action == DocumentAction.Remove:\n <9> paths = self.list_file_strategy.list_paths()\n<10> async for path in paths:\n<11> await self.blob_manager.remove_blob(path)\n<12> elif self.document_action == DocumentAction.RemoveAll:\n<13> await self.blob_manager.remove_blob()\n<14> \n<15> # Create an indexer\n<16> indexer_name = f\"{search_info.index_name}-indexer\"\n<17> \n<18> indexer = SearchIndexer(\n<19> name=indexer_name,\n<20> description=\"Indexer to index documents and generate embeddings\",\n<21> skillset_name=f\"{search_info.index_name}-skillset\",\n<22> target_index_name=search_info.index_name,\n<23> data_source_name=f\"{search_info.index_name}-blob\",\n<24> # Map the metadata_storage_name field to the title field in the index to display the PDF title in the search results\n<25> field_mappings=[FieldMapping(source_field_name=\"metadata_storage_name\", target_field_name=\"title\")],\n<26> )\n<27> \n<28> indexer_client = search_info.create_search_indexer_client()\n<29> indexer_result = await indexer_client.create_or_update_indexer(indexer)\n<30> \n<31> # Run the indexer\n<32> await indexer_client.run_indexer(indexer_name)\n<33> await indexer_client.close()\n<34> \n<35> print(\n<36> f\"Successfully created index, indexer: {indexer_result.name}, and skillset. Please navigate to search service in Azure Portal to"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n class IntegratedVectorizerStrategy(Strategy):\n + def run(self):\n - def run(self, search_info: SearchInfo):\n # offset: 1\n )\n \n \n===========unchanged ref 0===========\n at: scripts.prepdocslib.blobmanager.BlobManager\n upload_blob(file: File) -> Optional[List[str]]\n \n remove_blob(path: Optional[str]=None)\n \n at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy\n create_embedding_skill(index_name: str)\n \n at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.__init__\n self.list_file_strategy = list_file_strategy\n \n self.blob_manager = blob_manager\n \n self.document_action = document_action\n \n self.search_info = search_info\n \n at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.setup\n ds_client = self.search_info.create_search_indexer_client()\n \n at: scripts.prepdocslib.listfilestrategy.ListFileStrategy\n list() -> AsyncGenerator[File, None]\n \n list_paths() -> AsyncGenerator[str, None]\n \n at: scripts.prepdocslib.strategy\n DocumentAction()\n \n at: scripts.prepdocslib.strategy.SearchInfo\n create_search_indexer_client() -> SearchIndexerClient\n \n at: scripts.prepdocslib.strategy.SearchInfo.__init__\n self.index_name = index_name\n \n at: scripts.prepdocslib.strategy.Strategy\n run(self)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_blob(self, file: File) -> Optional[List[str]]:\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n await container_client.create_container()\n \n # Re-open and upload the original file\n with open(file.content.name, \"rb\") as reopened_file:\n blob_name = BlobManager.blob_name_from_file_name(file.content.name)\n + logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n - print(f\"\\tUploading blob for whole file -> {blob_name}\")\n await container_client.upload_blob(blob_name, reopened_file, overwrite=True)\n \n if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n return await self.upload_pdf_blob_images(service_client, container_client, file)\n \n return None\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n - if self.verbose:\n + logger.info(f\"\\tRemoving blob {blob_path}\")\n - print(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n \n===========changed ref 2===========\n self,\n list_file_strategy: ListFileStrategy,\n blob_manager: BlobManager,\n + search_info: SearchInfo,\n embeddings: Optional[AzureOpenAIEmbeddingService],\n subscription_id: str,\n search_service_user_assigned_id: str,\n document_action: DocumentAction = DocumentAction.Add,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n category: Optional[str] = None,\n ):\n + if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService):\n - if not embeddings:\n + raise Exception(\"Expecting AzureOpenAI embedding service\")\n - raise Exception(\"Expecting AzureOpenAI embedding Service\")\n \n self.list_file_strategy = list_file_strategy\n self.blob_manager = blob_manager\n self.document_action = document_action\n self.embeddings = embeddings\n self.subscription_id = subscription_id\n self.search_user_assigned_identity = search_service_user_assigned_id\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.category = category\n + self.search_info = search_info\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n class IntegratedVectorizerStrategy(Strategy):\n + def setup(self):\n - def setup(self, search_info: SearchInfo):\n search_manager = SearchManager(\n + search_info=self.search_info,\n - search_info=search_info,\n search_analyzer_name=self.search_analyzer_name,\n use_acls=self.use_acls,\n use_int_vectorization=True,\n embeddings=self.embeddings,\n search_images=False,\n )\n \n if self.embeddings is None:\n raise ValueError(\"Expecting Azure Open AI instance\")\n \n await search_manager.create_index(\n vectorizers=[\n AzureOpenAIVectorizer(\n + name=f\"{self.search_info.index_name}-vectorizer\",\n - name=f\"{search_info.index_name}-vectorizer\",\n kind=\"azureOpenAI\",\n azure_open_ai_parameters=AzureOpenAIParameters(\n resource_uri=f\"https://{self.embeddings.open_ai_service}.openai.azure.com\",\n deployment_id=self.embeddings.open_ai_deployment,\n ),\n ),\n ]\n )\n \n # create indexer client\n + ds_client = self.search_info.create_search_indexer_client()\n - ds_client = search_info.create_search_indexer_client()\n ds_container = SearchIndexerDataContainer(name=self.blob_manager.container)\n data_source_connection = SearchIndexerDataSourceConnection(\n + name=f\"{self.search_info.index_name}-blob\",\n - name=f\"{search_info.index_name}-blob\",\n type=\"azureblob\",\n connection_string=self.blob_manager.get_managedidentity_connectionstring(),\n container=ds_container,\n data_deletion_detection_policy=NativeBlobSoftDeleteDeletionDetectionPolicy(),\n )\n \n await ds_client.create_or_update_data_source_connection(data_source_connection)"}}},{"rowIdx":5817,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.pdfparser/LocalPdfParser.parse"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: if self.verbose:\n <1>: logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n <0> if self.verbose:\n <1> print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n <2> \n <3> reader = PdfReader(content)\n <4> pages = reader.pages\n <5> offset = 0\n <6> for page_num, p in enumerate(pages):\n <7> page_text = p.extract_text()\n <8> yield Page(page_num=page_num, offset=offset, text=page_text)\n <9> offset += len(page_text)\n<10> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.parser.Parser\n parse(self, content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.pdfparser\n logger = logging.getLogger(\"ingester\")\n \n at: typing\n AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\n \n IO()\n \n at: typing.IO\n __slots__ = ()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 1===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 2===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 9===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 10===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 13===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 18===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 19===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 20===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + \n===========changed ref 21===========\n # module: scripts.prepdocslib.listfilestrategy\n class ADLSGen2ListFileStrategy(ListFileStrategy):\n def __init__(\n self,\n data_lake_storage_account: str,\n data_lake_filesystem: str,\n data_lake_path: str,\n credential: Union[AsyncTokenCredential, str],\n - verbose: bool = False,\n ):\n self.data_lake_storage_account = data_lake_storage_account\n self.data_lake_filesystem = data_lake_filesystem\n self.data_lake_path = data_lake_path\n self.credential = credential\n - self.verbose = verbose\n "}}},{"rowIdx":5818,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.pdfparser/DocumentAnalysisParser.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <3>: self.verbose = verbose\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n <0> self.model_id = model_id\n <1> self.endpoint = endpoint\n <2> self.credential = credential\n <3> self.verbose = verbose\n <4> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.parser.Parser\n __init__(self, verbose: bool=False)\n \n parse(self, content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.pdfparser\n logger = logging.getLogger(\"ingester\")\n \n at: typing\n AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\n \n IO()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n - if self.verbose:\n + logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n - print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 10===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 14===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 17===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 18===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 19===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 20===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 21===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + "}}},{"rowIdx":5819,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.pdfparser/DocumentAnalysisParser.parse"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: if self.verbose:\n <1>: logger.info(f\"Extracting text from '{content.name}' using Azure Document Intelligence\")\n print(f\"Extracting text from '{content.name}' using Azure Document Intelligence\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n <0> if self.verbose:\n <1> print(f\"Extracting text from '{content.name}' using Azure Document Intelligence\")\n <2> \n <3> async with DocumentIntelligenceClient(\n <4> endpoint=self.endpoint, credential=self.credential\n <5> ) as document_intelligence_client:\n <6> poller = await document_intelligence_client.begin_analyze_document(\n <7> model_id=self.model_id, analyze_request=content, content_type=\"application/octet-stream\"\n <8> )\n <9> form_recognizer_results = await poller.result()\n<10> \n<11> offset = 0\n<12> for page_num, page in enumerate(form_recognizer_results.pages):\n<13> tables_on_page = [\n<14> table\n<15> for table in (form_recognizer_results.tables or [])\n<16> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1\n<17> ]\n<18> \n<19> # mark all positions of the table spans in the page\n<20> page_offset = page.spans[0].offset\n<21> page_length = page.spans[0].length\n<22> table_chars = [-1] * page_length\n<23> for table_id, table in enumerate(tables_on_page):\n<24> for span in table.spans:\n<25> # replace all table spans with \"table_id\" in table_chars array\n<26> for i in range(span.length):\n<27> idx = span.offset - page_offset + i\n<28> if idx >= 0 and idx < page_length:\n<29> table_chars[idx] = table_id\n<30> \n<31> # build page text by replacing characters in table spans with table html\n<32> page_text = \"\"\n<33> added_tables = set()\n<34> for idx, table_id in enumerate(table_chars):\n<35> if table_id == -1:\n<36> page_text += form_recognizer_"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n # offset: 1\n elif table_id not in added_tables:\n page_text += DocumentAnalysisParser.table_to_html(tables_on_page[table_id])\n added_tables.add(table_id)\n \n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n \n===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.pdfparser\n DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\")\n \n at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser.__init__\n self.model_id = model_id\n \n self.endpoint = endpoint\n \n self.credential = credential\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n self.model_id = model_id\n self.endpoint = endpoint\n self.credential = credential\n - self.verbose = verbose\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n - if self.verbose:\n + logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n - print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 3===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 11===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 15===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 18===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 20===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 21===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n "}}},{"rowIdx":5820,"cells":{"path":{"kind":"string","value":"tests.test_searchmanager/search_info"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <4>: verbose=True,\n"},"main_code":{"kind":"string","value":" # module: tests.test_searchmanager\n @pytest.fixture\n def search_info():\n <0> return SearchInfo(\n <1> endpoint=\"https://testsearchclient.blob.core.windows.net\",\n <2> credential=AzureKeyCredential(\"test\"),\n <3> index_name=\"test\",\n <4> verbose=True,\n <5> )\n <6> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.fixtures\n fixture(fixture_function: FixtureFunction, *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=...) -> FixtureFunction\n fixture(fixture_function: None=..., *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=None) -> FixtureFunctionMarker\n \n at: scripts.prepdocslib.strategy\n SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 2===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 4===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 10===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 11===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 12===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 13===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 14===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n self.model_id = model_id\n self.endpoint = endpoint\n self.credential = credential\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 18===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n \n===========changed ref 20===========\n # module: scripts.prepdocs\n + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]:\n + \"\"\"Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.\"\"\"\n + if key is not None and key.strip() != \"\":\n + return key.strip()\n + return None\n + \n===========changed ref 21===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n - self,\n - open_ai_model_name: str,\n - credential: str,\n - organization: Optional[str] = None,\n - disable_batch: bool = False,\n - verbose: bool = False,\n + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch, verbose)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 22===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def add_file(self, file: File):\n + if self.image_embeddings:\n + logging.warning(\"Image embeddings are not currently supported for the user upload feature\")\n + sections = await parse_file(file, self.file_processors)\n + if sections:\n + await self.search_manager.update_content(sections)\n + "}}},{"rowIdx":5821,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_empty_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: t = SentenceTextSplitter(has_image_embeddings=False)\n t = SentenceTextSplitter(False, True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n <0> t = SentenceTextSplitter(False, True)\n <1> \n <2> assert list(t.split_pages([])) == []\n <3> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.textsplitter\n SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500)\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # Chunking is disabled when using GPT4V. To be updated in the future.\n if self.has_image_embeddings:\n for i, page in enumerate(pages):\n yield SplitPage(page_num=i, text=page.text)\n \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole\n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n } table start {last_table_start}\"\n + )\n - )\n start = min(end - self.section_overlap, start + last_table_start)\n else:\n start = end - self.section_overlap\n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 4===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 11===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 12===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 13===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + "}}},{"rowIdx":5822,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_small_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: t = SentenceTextSplitter(has_image_embeddings=False)\n t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_small_pages():\n <0> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n <1> \n <2> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=\"Not a large page\")]))\n <3> assert len(split_pages) == 1\n <4> assert split_pages[0].page_num == 0\n <5> assert split_pages[0].text == \"Not a large page\"\n <6> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.page_num = page_num\n \n self.text = text\n \n at: scripts.prepdocslib.textsplitter\n SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500)\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # Chunking is disabled when using GPT4V. To be updated in the future.\n if self.has_image_embeddings:\n for i, page in enumerate(pages):\n yield SplitPage(page_num=i, text=page.text)\n \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole\n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n } table start {last_table_start}\"\n + )\n - )\n start = min(end - self.section_overlap, start + last_table_start)\n else:\n start = end - self.section_overlap\n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n \n===========changed ref 3===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(False, True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 5===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 11===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 12===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 13===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 14===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n "}}},{"rowIdx":5823,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_list_parse_and_split"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n text_splitter = SentenceTextSplitter(False, True)\n <1>: pdf_parser = LocalPdfParser()\n pdf_parser = LocalPdfParser(verbose=True)\n <5>: list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot):\n <0> text_splitter = SentenceTextSplitter(False, True)\n <1> pdf_parser = LocalPdfParser(verbose=True)\n <2> for pdf in Path(\"data\").glob(\"*.pdf\"):\n <3> shutil.copy(str(pdf.absolute()), tmp_path)\n <4> \n <5> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n <6> files = list_file_strategy.list()\n <7> processed = 0\n <8> results = {}\n <9> async for file in files:\n<10> pages = [page async for page in pdf_parser.parse(content=file.content)]\n<11> assert pages\n<12> sections = [\n<13> Section(split_page, content=file, category=\"test category\")\n<14> for split_page in text_splitter.split_pages(pages)\n<15> ]\n<16> assert sections\n<17> results[file.filename()] = [section.split_page.text for section in sections]\n<18> processed += 1\n<19> assert processed > 1\n<20> # Sort results by key\n<21> results = {k: results[k] for k in sorted(results)}\n<22> snapshot.assert_match(json.dumps(results, indent=2), \"text_splitter_sections.txt\")\n<23> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.tmpdir\n tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Generator[Path, None, None]\n \n at: json\n dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str\n \n at: pathlib\n Path()\n \n at: pathlib.Path\n __slots__ = ()\n \n glob(pattern: str) -> Generator[_P, None, None]\n \n absolute() -> _P\n \n at: scripts.prepdocslib.listfilestrategy\n LocalListFileStrategy(path_pattern: str)\n \n at: scripts.prepdocslib.listfilestrategy.File\n filename()\n \n at: scripts.prepdocslib.listfilestrategy.File.__init__\n self.content = content\n \n at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy\n list() -> AsyncGenerator[File, None]\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.text = text\n \n at: scripts.prepdocslib.pdfparser\n LocalPdfParser(verbose: bool=False)\n \n at: scripts.prepdocslib.pdfparser.LocalPdfParser\n parse(content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.searchmanager\n Section(split_page: SplitPage, content: File, category: Optional[str]=None)\n \n at: scripts.prepdocslib.searchmanager.Section.__init__\n self.split_page = split_page\n \n \n===========unchanged ref 1===========\n at: scripts.prepdocslib.textsplitter\n SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500)\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n at: shutil\n copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n - if self.verbose:\n + logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n - print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # Chunking is disabled when using GPT4V. To be updated in the future.\n if self.has_image_embeddings:\n for i, page in enumerate(pages):\n yield SplitPage(page_num=i, text=page.text)\n \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 3===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n } table start {last_table_start}\"\n + )\n - )\n start = min(end - self.section_overlap, start + last_table_start)\n else:\n start = end - self.section_overlap\n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n "}}},{"rowIdx":5824,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_simpletextsplitter_split_empty_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: t = SimpleTextSplitter()\n t = SimpleTextSplitter(True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_empty_pages():\n <0> t = SimpleTextSplitter(True)\n <1> \n <2> assert list(t.split_pages([])) == []\n <3> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.textsplitter\n SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False)\n \n at: scripts.prepdocslib.textsplitter.SimpleTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(False, True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 1===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_small_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=\"Not a large page\")]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == \"Not a large page\"\n \n===========changed ref 2===========\n # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot):\n + text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n - text_splitter = SentenceTextSplitter(False, True)\n + pdf_parser = LocalPdfParser()\n - pdf_parser = LocalPdfParser(verbose=True)\n for pdf in Path(\"data\").glob(\"*.pdf\"):\n shutil.copy(str(pdf.absolute()), tmp_path)\n \n + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n files = list_file_strategy.list()\n processed = 0\n results = {}\n async for file in files:\n pages = [page async for page in pdf_parser.parse(content=file.content)]\n assert pages\n sections = [\n Section(split_page, content=file, category=\"test category\")\n for split_page in text_splitter.split_pages(pages)\n ]\n assert sections\n results[file.filename()] = [section.split_page.text for section in sections]\n processed += 1\n assert processed > 1\n # Sort results by key\n results = {k: results[k] for k in sorted(results)}\n snapshot.assert_match(json.dumps(results, indent=2), \"text_splitter_sections.txt\")\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 4===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 5===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 11===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 12===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 13===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 14===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 17===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n self.model_id = model_id\n self.endpoint = endpoint\n self.credential = credential\n - self.verbose = verbose\n \n===========changed ref 18===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 20===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + \n===========changed ref 21===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the Vision embeddings API, sleeping before retrying...\")\n \n===========changed ref 22===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def before_retry_sleep(self, retry_state):\n - if self.verbose:\n + logger.info(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n - print(\"Rate limited on the OpenAI embeddings API, sleeping before retrying...\")\n "}}},{"rowIdx":5825,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_simpletextsplitter_split_small_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: t = SimpleTextSplitter()\n t = SimpleTextSplitter(verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_small_pages():\n <0> t = SimpleTextSplitter(verbose=True)\n <1> \n <2> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{\"test\": \"Not a large page\"}')]))\n <3> assert len(split_pages) == 1\n <4> assert split_pages[0].page_num == 0\n <5> assert split_pages[0].text == '{\"test\": \"Not a large page\"}'\n <6> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.page_num = page_num\n \n self.text = text\n \n at: scripts.prepdocslib.textsplitter\n SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False)\n \n at: scripts.prepdocslib.textsplitter.SimpleTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_empty_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 1===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(False, True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 2===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_small_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=\"Not a large page\")]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == \"Not a large page\"\n \n===========changed ref 3===========\n # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot):\n + text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n - text_splitter = SentenceTextSplitter(False, True)\n + pdf_parser = LocalPdfParser()\n - pdf_parser = LocalPdfParser(verbose=True)\n for pdf in Path(\"data\").glob(\"*.pdf\"):\n shutil.copy(str(pdf.absolute()), tmp_path)\n \n + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n files = list_file_strategy.list()\n processed = 0\n results = {}\n async for file in files:\n pages = [page async for page in pdf_parser.parse(content=file.content)]\n assert pages\n sections = [\n Section(split_page, content=file, category=\"test category\")\n for split_page in text_splitter.split_pages(pages)\n ]\n assert sections\n results[file.filename()] = [section.split_page.text for section in sections]\n processed += 1\n assert processed > 1\n # Sort results by key\n results = {k: results[k] for k in sorted(results)}\n snapshot.assert_match(json.dumps(results, indent=2), \"text_splitter_sections.txt\")\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 5===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 6===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 11===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 12===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 13===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 14===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 15===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 18===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n self.model_id = model_id\n self.endpoint = endpoint\n self.credential = credential\n - self.verbose = verbose\n \n===========changed ref 19===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 20===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n \n===========changed ref 21===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + def remove_file(self, filename: str, oid: str):\n + if filename is None or filename == \"\":\n + logging.warning(\"Filename is required to remove a file\")\n + return\n + await self.search_manager.remove_content(filename, oid)\n + "}}},{"rowIdx":5826,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <1>: t = SimpleTextSplitter(max_object_length=max_object_length)\n t = SimpleTextSplitter(max_object_length=max_object_length, verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_pages():\n <0> max_object_length = 10\n <1> t = SimpleTextSplitter(max_object_length=max_object_length, verbose=True)\n <2> \n <3> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{\"test\": \"Not a large page\"}')]))\n <4> assert len(split_pages) == 3\n <5> assert split_pages[0].page_num == 0\n <6> assert split_pages[0].text == '{\"test\": \"'\n <7> assert len(split_pages[0].text) <= max_object_length\n <8> assert split_pages[1].page_num == 1\n <9> assert split_pages[1].text == \"Not a larg\"\n<10> assert len(split_pages[1].text) <= max_object_length\n<11> assert split_pages[2].page_num == 2\n<12> assert split_pages[2].text == 'e page\"}'\n<13> assert len(split_pages[2].text) <= max_object_length\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.page_num = page_num\n \n self.text = text\n \n at: scripts.prepdocslib.textsplitter\n SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False)\n \n at: scripts.prepdocslib.textsplitter.SimpleTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_empty_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 1===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(False, True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 2===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_small_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{\"test\": \"Not a large page\"}')]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == '{\"test\": \"Not a large page\"}'\n \n===========changed ref 3===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_small_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=\"Not a large page\")]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == \"Not a large page\"\n \n===========changed ref 4===========\n # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot):\n + text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n - text_splitter = SentenceTextSplitter(False, True)\n + pdf_parser = LocalPdfParser()\n - pdf_parser = LocalPdfParser(verbose=True)\n for pdf in Path(\"data\").glob(\"*.pdf\"):\n shutil.copy(str(pdf.absolute()), tmp_path)\n \n + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n files = list_file_strategy.list()\n processed = 0\n results = {}\n async for file in files:\n pages = [page async for page in pdf_parser.parse(content=file.content)]\n assert pages\n sections = [\n Section(split_page, content=file, category=\"test category\")\n for split_page in text_splitter.split_pages(pages)\n ]\n assert sections\n results[file.filename()] = [section.split_page.text for section in sections]\n processed += 1\n assert processed > 1\n # Sort results by key\n results = {k: results[k] for k in sorted(results)}\n snapshot.assert_match(json.dumps(results, indent=2), \"text_splitter_sections.txt\")\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.parser\n class Parser(ABC):\n - def __init__(\n - self,\n - verbose: bool = False,\n - ):\n - self.verbose = verbose\n - \n===========changed ref 6===========\n # module: scripts.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 7===========\n # module: scripts.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 8===========\n # module: scripts.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 9===========\n # module: scripts.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 10===========\n # module: scripts.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 11===========\n # module: scripts.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 12===========\n # module: scripts.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 13===========\n # module: scripts.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 14===========\n # module: scripts.prepdocs\n - def is_key_empty(key):\n - return key is None or len(key.strip()) == 0\n - \n===========changed ref 15===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n - def __init__(self, path_pattern: str, verbose: bool = False):\n self.path_pattern = path_pattern\n - self.verbose = verbose\n \n===========changed ref 16===========\n # module: scripts.prepdocslib.textsplitter\n class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n - def __init__(self, max_object_length: int = 1000, verbose: bool = False):\n self.max_object_length = max_object_length\n - self.verbose = verbose\n \n===========changed ref 17===========\n # module: scripts.prepdocslib.embeddings\n class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False):\n self.token_provider = token_provider\n self.endpoint = endpoint\n - self.verbose = verbose\n \n===========changed ref 18===========\n # module: scripts.prepdocslib.filestrategy\n + class UploadUserFileStrategy:\n + \"\"\"\n + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account\n + \"\"\"\n + \n===========changed ref 19===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - model_id=\"prebuilt-layout\",\n - verbose: bool = False,\n + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\"\n ):\n self.model_id = model_id\n self.endpoint = endpoint\n self.credential = credential\n - self.verbose = verbose\n \n===========changed ref 20===========\n # module: scripts.prepdocslib.strategy\n class SearchInfo:\n - def __init__(\n - self,\n - endpoint: str,\n - credential: Union[AsyncTokenCredential, AzureKeyCredential],\n - index_name: str,\n - verbose: bool = False,\n - ):\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n self.endpoint = endpoint\n self.credential = credential\n self.index_name = index_name\n - self.verbose = verbose\n \n===========changed ref 21===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False):\n self.open_ai_model_name = open_ai_model_name\n self.disable_batch = disable_batch\n - self.verbose = verbose\n "}}},{"rowIdx":5827,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_multilang"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n text_splitter = SentenceTextSplitter(False, True)\n <6>: list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_multilang(test_doc, tmp_path):\n <0> text_splitter = SentenceTextSplitter(False, True)\n <1> bpe = tiktoken.encoding_for_model(ENCODING_MODEL)\n <2> pdf_parser = LocalPdfParser()\n <3> \n <4> shutil.copy(str(test_doc.absolute()), tmp_path)\n <5> \n <6> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"), verbose=True)\n <7> files = list_file_strategy.list()\n <8> processed = 0\n <9> async for file in files:\n<10> pages = [page async for page in pdf_parser.parse(content=file.content)]\n<11> assert pages\n<12> sections = [\n<13> Section(split_page, content=file, category=\"test category\")\n<14> for split_page in text_splitter.split_pages(pages)\n<15> ]\n<16> assert sections\n<17> processed += 1\n<18> \n<19> # Verify the size of the sections\n<20> token_lengths = []\n<21> for section in sections:\n<22> assert len(section.split_page.text) <= (text_splitter.max_section_length * 1.2)\n<23> # Verify the number of tokens is below 500\n<24> token_lengths.append((len(bpe.encode(section.split_page.text)), len(section.split_page.text)))\n<25> # verify that none of the numbers in token_lengths are above 500\n<26> assert all([tok_len <= text_splitter.max_tokens_per_section for tok_len, _ in token_lengths]), (\n<27> test_doc.name,\n<28> token_lengths,\n<29> )\n<30> assert processed == 1\n<31> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: scripts.prepdocslib.listfilestrategy\n LocalListFileStrategy(path_pattern: str)\n \n at: scripts.prepdocslib.listfilestrategy.File.__init__\n self.content = content\n \n at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy\n list() -> AsyncGenerator[File, None]\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.text = text\n \n at: scripts.prepdocslib.pdfparser\n LocalPdfParser(verbose: bool=False)\n \n at: scripts.prepdocslib.pdfparser.LocalPdfParser\n parse(content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.searchmanager\n Section(split_page: SplitPage, content: File, category: Optional[str]=None)\n \n at: scripts.prepdocslib.searchmanager.Section.__init__\n self.split_page = split_page\n \n at: scripts.prepdocslib.textsplitter\n ENCODING_MODEL = \"text-embedding-ada-002\"\n \n SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500)\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__\n self.max_section_length = DEFAULT_SECTION_LENGTH\n \n self.max_tokens_per_section = max_tokens_per_section\n \n at: shutil\n copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn\n \n at: tiktoken.core.Encoding\n encode(text: str, *, allowed_special: Union[Literal[\"all\"], AbstractSet[str]]=set(), disallowed_special: Union[Literal[\"all\"], Collection[str]]=\"all\") -> list[int]\n \n \n===========unchanged ref 1===========\n at: tiktoken.model\n encoding_for_model(model_name: str) -> Encoding\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n - if self.verbose:\n + logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n - print(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # Chunking is disabled when using GPT4V. To be updated in the future.\n if self.has_image_embeddings:\n for i, page in enumerate(pages):\n yield SplitPage(page_num=i, text=page.text)\n \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 3===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n } table start {last_table_start}\"\n + )\n - )\n start = min(end - self.section_overlap, start + last_table_start)\n else:\n start = end - self.section_overlap\n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n \n===========changed ref 4===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_empty_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(True)\n \n assert list(t.split_pages([])) == []\n "}}},{"rowIdx":5828,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_split_tables"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"87f2b9d9fd554d29f16aead4269be883e8927bb5"},"commit_message":{"kind":"string","value":"Refactoring of prepdocs for easier integration with user upload feature (#1407)"},"ground_truth":{"kind":"string","value":" <0>: t = SentenceTextSplitter(has_image_embeddings=False)\n t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n def test_split_tables():\n <0> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n <1> \n <2> test_text_without_table = \"\"\"Contoso Electronics is a leader in the aerospace industry, providing advanced electronic\n <3> components for both commercial and military aircraft. We specialize in creating cutting-\n <4> edge systems that are both reliable and efficient. Our mission is to provide the highest\n <5> quality aircraft components to our customers, while maintaining a commitment to safety\n <6> and excellence. We are proud to have built a strong reputation in the aerospace industry\n <7> and strive to continually improve our products and services. Our experienced team of\n <8> engineers and technicians are dedicated to providing the best products and services to our\n <9> customers. With our commitment to excellence, we are sure to remain a leader in the\n<10> aerospace industry for years to come. At Contoso Electronics, we strive to ensure our employees are getting the feedback they\n<11> need to continue growing and developing in their roles. We understand that performance\n<12> reviews are a key part of this process and it is important to us that they are conducted in an\n<13> effective and efficient manner Performance reviews are conducted annually and are an important part of your career\n<14> development. During the review, your supervisor will discuss your performance over the\n<15> past year and provide feedback on areas for improvement. They will also provide you with\n<16> an opportunity to discuss your goals and objectives for the upcoming year.\n<17> \n<18> \"\"\"\n<19> test_text_with_table = test_text_without_table.replace(\"\", \"\")\n<20> \n<21> split_pages_with_table = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=test_text_with_table)]))\n<22> split_pages_without_table = list(t.split_pages(pages=[Page(page_num=0, offset=0"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_prepdocslib_textsplitter\n def test_split_tables():\n # offset: 1\n \n assert len(split_pages_with_table) == 2\n \n assert split_pages_with_table[0].text != split_pages_without_table[0].text\n \n # The table algorithm should move the start of the second section to include the table start\n # but only in the test text that has a table tag..\n assert \" Generator[SplitPage, None, None]\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # Chunking is disabled when using GPT4V. To be updated in the future.\n if self.has_image_embeddings:\n for i, page in enumerate(pages):\n yield SplitPage(page_num=i, text=page.text)\n \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole\n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n } table start {last_table_start}\"\n + )\n - )\n start = min(end - self.section_overlap, start + last_table_start)\n else:\n start = end - self.section_overlap\n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n \n===========changed ref 3===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_empty_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 4===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_empty_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(False, True)\n \n assert list(t.split_pages([])) == []\n \n===========changed ref 5===========\n # module: tests.test_prepdocslib_textsplitter\n def test_simpletextsplitter_split_small_pages():\n + t = SimpleTextSplitter()\n - t = SimpleTextSplitter(verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{\"test\": \"Not a large page\"}')]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == '{\"test\": \"Not a large page\"}'\n \n===========changed ref 6===========\n # module: tests.test_prepdocslib_textsplitter\n def test_sentencetextsplitter_split_small_pages():\n + t = SentenceTextSplitter(has_image_embeddings=False)\n - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)\n \n split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=\"Not a large page\")]))\n assert len(split_pages) == 1\n assert split_pages[0].page_num == 0\n assert split_pages[0].text == \"Not a large page\"\n "}}},{"rowIdx":5829,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.textsplitter/SentenceTextSplitter.split_pages"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <0>: # Chunking is disabled when using GPT4V. To be updated in the future.\n <1>: if self.has_image_embeddings:\n <2>: for i, page in enumerate(pages):\n <3>: yield SplitPage(page_num=i, text=page.text)\n <4>: \n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n <0> # Chunking is disabled when using GPT4V. To be updated in the future.\n <1> if self.has_image_embeddings:\n <2> for i, page in enumerate(pages):\n <3> yield SplitPage(page_num=i, text=page.text)\n <4> \n <5> def find_page(offset):\n <6> num_pages = len(pages)\n <7> for i in range(num_pages - 1):\n <8> if offset >= pages[i].offset and offset < pages[i + 1].offset:\n <9> return pages[i].page_num\n<10> return pages[num_pages - 1].page_num\n<11> \n<12> all_text = \"\".join(page.text for page in pages)\n<13> if len(all_text.strip()) == 0:\n<14> return\n<15> \n<16> length = len(all_text)\n<17> if length <= self.max_section_length:\n<18> yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n<19> return\n<20> \n<21> start = 0\n<22> end = length\n<23> while start + self.section_overlap < length:\n<24> last_word = -1\n<25> end = start + self.max_section_length\n<26> \n<27> if end > length:\n<28> end = length\n<29> else:\n<30> # Try to find the end of the sentence\n<31> while (\n<32> end < length\n<33> and (end - start - self.max_section_length) < self.sentence_search_limit\n<34> and all_text[end] not in self.sentence_endings\n<35> ):\n<36> if all_text[end] in self.word_breaks:\n<37> last_word = end\n<38> end += 1\n<39> if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n<40> end = last_"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========below chunk 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n \n \n===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n SplitPage(page_num: int, text: str)\n \n at: scripts.prepdocslib.page.Page.__init__\n self.page_num = page_num\n \n self.offset = offset\n \n self.text = text\n \n at: scripts.prepdocslib.textsplitter\n logger = logging.getLogger(\"ingester\")\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_page_by_max_tokens(page_num: int, text: str) -> Generator[SplitPage, None, None]\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__\n self.sentence_endings = STANDARD_SENTENCE_ENDINGS + CJK_SENTENCE_ENDINGS\n \n self.word_breaks = STANDARD_WORD_BREAKS + CJK_WORD_BREAKS\n \n self.max_section_length = DEFAULT_SECTION_LENGTH\n \n self.sentence_search_limit = 100\n \n self.section_overlap = self.max_section_length // DEFAULT_OVERLAP_PERCENT\n \n self.has_image_embeddings = has_image_embeddings\n \n at: scripts.prepdocslib.textsplitter.TextSplitter\n split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]\n \n at: typing\n List = _alias(list, 1, inst=False, name='List')\n \n Generator = _alias(collections.abc.Generator, 3)\n \n "}}},{"rowIdx":5830,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.htmlparser/LocalHTMLParser.parse"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <7>: logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n <0> \"\"\"Parses the given content.\n <1> To learn more, please visit https://pypi.org/project/beautifulsoup4/\n <2> Args:\n <3> content (IO): The content to parse.\n <4> Returns:\n <5> Page: The parsed html Page.\n <6> \"\"\"\n <7> logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n <8> \n <9> data = content.read()\n<10> soup = BeautifulSoup(data, \"html.parser\")\n<11> \n<12> # Get text only from html file\n<13> result = soup.get_text()\n<14> \n<15> yield Page(0, 0, text=cleanup_data(result))\n<16> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n - # Chunking is disabled when using GPT4V. To be updated in the future.\n - if self.has_image_embeddings:\n - for i, page in enumerate(pages):\n - yield SplitPage(page_num=i, text=page.text)\n - \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence\n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n \n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n "}}},{"rowIdx":5831,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.listfilestrategy/LocalListFileStrategy.check_md5"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":"<14>: logger.info(\"Skipping %s, no changes detected.\", path)\n logger.info(f\"Skipping {path}, no changes detected.\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n <0> # if filename ends in .md5 skip\n <1> if path.endswith(\".md5\"):\n <2> return True\n <3> \n <4> # if there is a file called .md5 in this directory, see if its updated\n <5> stored_hash = None\n <6> with open(path, \"rb\") as file:\n <7> existing_hash = hashlib.md5(file.read()).hexdigest()\n <8> hash_path = f\"{path}.md5\"\n <9> if os.path.exists(hash_path):\n<10> with open(hash_path, encoding=\"utf-8\") as md5_f:\n<11> stored_hash = md5_f.read()\n<12> \n<13> if stored_hash and stored_hash.strip() == existing_hash.strip():\n<14> logger.info(f\"Skipping {path}, no changes detected.\")\n<15> return True\n<16> \n<17> # Write the hash\n<18> with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n<19> md5_f.write(existing_hash)\n<20> \n<21> return False\n<22> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: hashlib\n md5(string: ReadableBuffer=...) -> _Hash\n \n at: hashlib._Hash\n digest_size: int\n \n block_size: int\n \n name: str\n \n hexdigest() -> str\n \n at: io.BufferedRandom\n read(self, size: Optional[int]=..., /) -> bytes\n \n at: io.BufferedReader\n read(self, size: Optional[int]=..., /) -> bytes\n \n at: io.FileIO\n write(self, b: ReadableBuffer, /) -> int\n \n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: os.path\n exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool\n \n at: scripts.prepdocslib.listfilestrategy\n logger = logging.getLogger(\"ingester\")\n \n at: typing.BinaryIO\n __slots__ = ()\n \n write(s: AnyStr) -> int\n \n at: typing.IO\n __slots__ = ()\n \n read(n: int=...) -> AnyStr\n \n write(s: AnyStr) -> int\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n - # Chunking is disabled when using GPT4V. To be updated in the future.\n - if self.has_image_embeddings:\n - for i, page in enumerate(pages):\n - yield SplitPage(page_num=i, text=page.text)\n - \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence\n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 3===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n \n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n "}}},{"rowIdx":5832,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_batch"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":"<13>: logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n <0> batches = self.split_text_into_batches(texts)\n <1> embeddings = []\n <2> client = await self.create_client()\n <3> for batch in batches:\n <4> async for attempt in AsyncRetrying(\n <5> retry=retry_if_exception_type(RateLimitError),\n <6> wait=wait_random_exponential(min=15, max=60),\n <7> stop=stop_after_attempt(15),\n <8> before_sleep=self.before_retry_sleep,\n <9> ):\n<10> with attempt:\n<11> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n<12> embeddings.extend([data.embedding for data in emb_response.data])\n<13> logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n<14> \n<15> return embeddings\n<16> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n - # Chunking is disabled when using GPT4V. To be updated in the future.\n - if self.has_image_embeddings:\n - for i, page in enumerate(pages):\n - yield SplitPage(page_num=i, text=page.text)\n - \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence\n===========changed ref 3===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 4===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n \n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n "}}},{"rowIdx":5833,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_single"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <9>: logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n <0> client = await self.create_client()\n <1> async for attempt in AsyncRetrying(\n <2> retry=retry_if_exception_type(RateLimitError),\n <3> wait=wait_random_exponential(min=15, max=60),\n <4> stop=stop_after_attempt(15),\n <5> before_sleep=self.before_retry_sleep,\n <6> ):\n <7> with attempt:\n <8> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n <9> \n<10> return emb_response.data[0].embedding\n<11> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n - # Chunking is disabled when using GPT4V. To be updated in the future.\n - if self.has_image_embeddings:\n - for i, page in enumerate(pages):\n - yield SplitPage(page_num=i, text=page.text)\n - \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence\n===========changed ref 4===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 1\n at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence or at least a whole word boundary\n last_word = -1\n while (\n start > 0\n and start > end - self.max_section_length - 2 * self.sentence_search_limit\n and all_text[start] not in self.sentence_endings\n ):\n if all_text[start] in self.word_breaks:\n last_word = start\n start -= 1\n if all_text[start] not in self.sentence_endings and last_word > 0:\n start = last_word\n if start > 0:\n start += 1\n \n section_text = all_text[start:end]\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text)\n \n last_table_start = section_text.rfind(\" 2 * self.sentence_search_limit and last_table_start > section_text.rfind(\"\n===========changed ref 5===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n # offset: 2\n \n \n if start + self.section_overlap < end:\n yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])\n "}}},{"rowIdx":5834,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.blobmanager/BlobManager.upload_blob"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <9>: logger.info(\"Uploading blob for whole file -> %s\", blob_name)\n logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n<12>: if self.store_page_images:\n if os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n<13>: return await self.upload_pdf_blob_images(service_client, container_client, file)\n return await self.upload_pdf_blob_images(service_client, container_client, file)\n<14>: else:\n logger.info(\"File %s is not a PDF, skipping image upload\", file.content.name)\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_blob(self, file: File) -> Optional[List[str]]:\n <0> async with BlobServiceClient(\n <1> account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024\n <2> ) as service_client, service_client.get_container_client(self.container) as container_client:\n <3> if not await container_client.exists():\n <4> await container_client.create_container()\n <5> \n <6> # Re-open and upload the original file\n <7> with open(file.content.name, \"rb\") as reopened_file:\n <8> blob_name = BlobManager.blob_name_from_file_name(file.content.name)\n <9> logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n<10> await container_client.upload_blob(blob_name, reopened_file, overwrite=True)\n<11> \n<12> if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n<13> return await self.upload_pdf_blob_images(service_client, container_client, file)\n<14> \n<15> return None\n<16> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]:\n - # Chunking is disabled when using GPT4V. To be updated in the future.\n - if self.has_image_embeddings:\n - for i, page in enumerate(pages):\n - yield SplitPage(page_num=i, text=page.text)\n - \n def find_page(offset):\n num_pages = len(pages)\n for i in range(num_pages - 1):\n if offset >= pages[i].offset and offset < pages[i + 1].offset:\n return pages[i].page_num\n return pages[num_pages - 1].page_num\n \n all_text = \"\".join(page.text for page in pages)\n if len(all_text.strip()) == 0:\n return\n \n length = len(all_text)\n if length <= self.max_section_length:\n yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text)\n return\n \n start = 0\n end = length\n while start + self.section_overlap < length:\n last_word = -1\n end = start + self.max_section_length\n \n if end > length:\n end = length\n else:\n # Try to find the end of the sentence\n while (\n end < length\n and (end - start - self.max_section_length) < self.sentence_search_limit\n and all_text[end] not in self.sentence_endings\n ):\n if all_text[end] in self.word_breaks:\n last_word = end\n end += 1\n if end < length and all_text[end] not in self.sentence_endings and last_word > 0:\n end = last_word # Fall back to at least keeping a whole word\n if end < length:\n end += 1\n \n # Try to find the start of the sentence"}}},{"rowIdx":5835,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.blobmanager/BlobManager.upload_pdf_blob_images"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":"<15>: logger.info(\"Unable to find arial.ttf or FreeMono.ttf, using default font\")\n logger.info(\"\\tUnable to find arial.ttf or FreeMono.ttf, using default font\")\n<19>: logger.info(\"Converting page %s to image and uploading -> %s\", i, blob_name)\n logger.info(f\"\\tConverting page {i} to image and uploading -> {blob_name}\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_pdf_blob_images(\n self, service_client: BlobServiceClient, container_client: ContainerClient, file: File\n ) -> List[str]:\n <0> with open(file.content.name, \"rb\") as reopened_file:\n <1> reader = PdfReader(reopened_file)\n <2> page_count = len(reader.pages)\n <3> doc = fitz.open(file.content.name)\n <4> sas_uris = []\n <5> start_time = datetime.datetime.now(datetime.timezone.utc)\n <6> expiry_time = start_time + datetime.timedelta(days=1)\n <7> \n <8> font = None\n <9> try:\n<10> font = ImageFont.truetype(\"arial.ttf\", 20)\n<11> except OSError:\n<12> try:\n<13> font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", 20)\n<14> except OSError:\n<15> logger.info(\"\\tUnable to find arial.ttf or FreeMono.ttf, using default font\")\n<16> \n<17> for i in range(page_count):\n<18> blob_name = BlobManager.blob_image_name_from_file_page(file.content.name, i)\n<19> logger.info(f\"\\tConverting page {i} to image and uploading -> {blob_name}\")\n<20> \n<21> doc = fitz.open(file.content.name)\n<22> page = doc.load_page(i)\n<23> pix = page.get_pixmap()\n<24> original_img = Image.frombytes(\"RGB\", [pix.width, pix.height], pix.samples) # type: ignore\n<25> \n<26> # Create a new image with additional space for text\n<27> text_height = 40 # Height of the text area\n<28> new_img = Image.new(\"RGB\", (original_img.width, original_img.height + text_height), \"white\")\n<29> \n<30> # Paste the original image onto the new image\n<31> new_img.paste("},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_pdf_blob_images(\n self, service_client: BlobServiceClient, container_client: ContainerClient, file: File\n ) -> List[str]:\n # offset: 1\n \n # Draw the text on the white area\n draw = ImageDraw.Draw(new_img)\n text = f\"SourceFileName:{blob_name}\"\n \n # 10 pixels from the top and left of the image\n x = 10\n y = 10\n draw.text((x, y), text, font=font, fill=\"black\")\n \n output = io.BytesIO()\n new_img.save(output, format=\"PNG\")\n output.seek(0)\n \n blob_client = await container_client.upload_blob(blob_name, output, overwrite=True)\n if not self.user_delegation_key:\n self.user_delegation_key = await service_client.get_user_delegation_key(start_time, expiry_time)\n \n if blob_client.account_name is not None:\n sas_token = generate_blob_sas(\n account_name=blob_client.account_name,\n container_name=blob_client.container_name,\n blob_name=blob_client.blob_name,\n user_delegation_key=self.user_delegation_key,\n permission=BlobSasPermissions(read=True),\n expiry=expiry_time,\n start=start_time,\n )\n sas_uris.append(f\"{blob_client.url}?{sas_token}\")\n \n return sas_uris\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_blob(self, file: File) -> Optional[List[str]]:\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n await container_client.create_container()\n \n # Re-open and upload the original file\n with open(file.content.name, \"rb\") as reopened_file:\n blob_name = BlobManager.blob_name_from_file_name(file.content.name)\n + logger.info(\"Uploading blob for whole file -> %s\", blob_name)\n - logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n await container_client.upload_blob(blob_name, reopened_file, overwrite=True)\n \n + if self.store_page_images:\n + if os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n + return await self.upload_pdf_blob_images(service_client, container_client, file)\n - return await self.upload_pdf_blob_images(service_client, container_client, file)\n + else:\n + logger.info(\"File %s is not a PDF, skipping image upload\", file.content.name)\n \n return None\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n "}}},{"rowIdx":5836,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.blobmanager/BlobManager.remove_blob"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":"<20>: logger.info(\"Removing blob %s\", blob_path)\n logger.info(f\"\\tRemoving blob {blob_path}\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n <0> async with BlobServiceClient(\n <1> account_url=self.endpoint, credential=self.credential\n <2> ) as service_client, service_client.get_container_client(self.container) as container_client:\n <3> if not await container_client.exists():\n <4> return\n <5> if path is None:\n <6> prefix = None\n <7> blobs = container_client.list_blob_names()\n <8> else:\n <9> prefix = os.path.splitext(os.path.basename(path))[0]\n<10> blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n<11> async for blob_path in blobs:\n<12> # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n<13> if (\n<14> prefix is not None\n<15> and (\n<16> not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n<17> )\n<18> ) or (path is not None and blob_path == os.path.basename(path)):\n<19> continue\n<20> logger.info(f\"\\tRemoving blob {blob_path}\")\n<21> await container_client.delete_blob(blob_path)\n<22> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_blob(self, file: File) -> Optional[List[str]]:\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n await container_client.create_container()\n \n # Re-open and upload the original file\n with open(file.content.name, \"rb\") as reopened_file:\n blob_name = BlobManager.blob_name_from_file_name(file.content.name)\n + logger.info(\"Uploading blob for whole file -> %s\", blob_name)\n - logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n await container_client.upload_blob(blob_name, reopened_file, overwrite=True)\n \n + if self.store_page_images:\n + if os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n + return await self.upload_pdf_blob_images(service_client, container_client, file)\n - return await self.upload_pdf_blob_images(service_client, container_client, file)\n + else:\n + logger.info(\"File %s is not a PDF, skipping image upload\", file.content.name)\n \n return None\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_pdf_blob_images(\n self, service_client: BlobServiceClient, container_client: ContainerClient, file: File\n ) -> List[str]:\n with open(file.content.name, \"rb\") as reopened_file:\n reader = PdfReader(reopened_file)\n page_count = len(reader.pages)\n doc = fitz.open(file.content.name)\n sas_uris = []\n start_time = datetime.datetime.now(datetime.timezone.utc)\n expiry_time = start_time + datetime.timedelta(days=1)\n \n font = None\n try:\n font = ImageFont.truetype(\"arial.ttf\", 20)\n except OSError:\n try:\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", 20)\n except OSError:\n + logger.info(\"Unable to find arial.ttf or FreeMono.ttf, using default font\")\n - logger.info(\"\\tUnable to find arial.ttf or FreeMono.ttf, using default font\")\n \n for i in range(page_count):\n blob_name = BlobManager.blob_image_name_from_file_page(file.content.name, i)\n + logger.info(\"Converting page %s to image and uploading -> %s\", i, blob_name)\n - logger.info(f\"\\tConverting page {i} to image and uploading -> {blob_name}\")\n \n doc = fitz.open(file.content.name)\n page = doc.load_page(i)\n pix = page.get_pixmap()\n original_img = Image.frombytes(\"RGB\", [pix.width, pix.height], pix.samples) # type: ignore\n \n # Create a new image with additional space for text\n text_height = 40 # Height of the text area\n new_img = Image.new(\"RGB\", (original_img.width, original_img.height + text_height), \"\n===========changed ref 2===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_pdf_blob_images(\n self, service_client: BlobServiceClient, container_client: ContainerClient, file: File\n ) -> List[str]:\n # offset: 1\n \n new_img = Image.new(\"RGB\", (original_img.width, original_img.height + text_height), \"white\")\n \n # Paste the original image onto the new image\n new_img.paste(original_img, (0, text_height))\n \n # Draw the text on the white area\n draw = ImageDraw.Draw(new_img)\n text = f\"SourceFileName:{blob_name}\"\n \n # 10 pixels from the top and left of the image\n x = 10\n y = 10\n draw.text((x, y), text, font=font, fill=\"black\")\n \n output = io.BytesIO()\n new_img.save(output, format=\"PNG\")\n output.seek(0)\n \n blob_client = await container_client.upload_blob(blob_name, output, overwrite=True)\n if not self.user_delegation_key:\n self.user_delegation_key = await service_client.get_user_delegation_key(start_time, expiry_time)\n \n if blob_client.account_name is not None:\n sas_token = generate_blob_sas(\n account_name=blob_client.account_name,\n container_name=blob_client.container_name,\n blob_name=blob_client.blob_name,\n user_delegation_key=self.user_delegation_key,\n permission=BlobSasPermissions(read=True),\n expiry=expiry_time,\n start=start_time,\n )\n sas_uris.append(f\"{blob_client.url}?{sas_token}\")\n \n return sas_uris\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n "}}},{"rowIdx":5837,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.searchmanager/SearchManager.create_index"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <0>: logger.info(\"Ensuring search index %s exists\", self.search_info.index_name)\n logger.info(f\"Ensuring search index {self.search_info.index_name} exists\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n <0> logger.info(f\"Ensuring search index {self.search_info.index_name} exists\")\n <1> \n <2> async with self.search_info.create_search_index_client() as search_index_client:\n <3> fields = [\n <4> (\n <5> SimpleField(name=\"id\", type=\"Edm.String\", key=True)\n <6> if not self.use_int_vectorization\n <7> else SearchField(\n <8> name=\"id\",\n <9> type=\"Edm.String\",\n<10> key=True,\n<11> sortable=True,\n<12> filterable=True,\n<13> facetable=True,\n<14> analyzer_name=\"keyword\",\n<15> )\n<16> ),\n<17> SearchableField(\n<18> name=\"content\",\n<19> type=\"Edm.String\",\n<20> analyzer_name=self.search_analyzer_name,\n<21> ),\n<22> SearchField(\n<23> name=\"embedding\",\n<24> type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n<25> hidden=False,\n<26> searchable=True,\n<27> filterable=False,\n<28> sortable=False,\n<29> facetable=False,\n<30> vector_search_dimensions=1536,\n<31> vector_search_profile_name=\"embedding_config\",\n<32> ),\n<33> SimpleField(name=\"category\", type=\"Edm.String\", filterable=True, facetable=True),\n<34> SimpleField(\n<35> name=\"sourcepage\",\n<36> type=\"Edm.String\",\n<37> filterable=True,\n<38> facetable=True,\n<39> ),\n<40> SimpleField(\n<41> name=\"sourcefile\",\n<42> type=\"Edm.String\",\n<43> filterable=True,\n<44> facetable=True,\n<45> ),\n<46> ]\n<47> if self.use_acls:\n<48> fields.append(\n<49> SimpleField(\n<50> name=\"oids\",\n<51> type=Search"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 1\n filterable=True,\n )\n )\n fields.append(\n SimpleField(\n name=\"groups\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n if self.use_int_vectorization:\n fields.append(SearchableField(name=\"parent_id\", type=\"Edm.String\", filterable=True))\n if self.search_images:\n fields.append(\n SearchField(\n name=\"imageEmbedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1024,\n vector_search_profile_name=\"embedding_config\",\n ),\n )\n \n index = SearchIndex(\n name=self.search_info.index_name,\n fields=fields,\n semantic_search=SemanticSearch(\n configurations=[\n SemanticConfiguration(\n name=\"default\",\n prioritized_fields=SemanticPrioritizedFields(\n title_field=None, content_fields=[SemanticField(field_name=\"content\")]\n ),\n )\n ]\n ),\n vector_search=VectorSearch(\n algorithms=[\n HnswAlgorithmConfiguration(\n name=\"hnsw_config\",\n parameters=HnswParameters(metric=\"cosine\"),\n )\n ],\n profiles=[\n VectorSearchProfile(\n name=\"embedding_config\",\n algorithm_configuration_name=\"hnsw_config\",\n vectorizer=(\n f\"{self.search_info.index_name}-vectorizer\" if self.use_int_vectorization else None\n ),\n ),\n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_\n===========below chunk 1===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 2\n \n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]:\n logger.info(f\"Creating {self.search_info.index_name} search index\")\n await search_index_client.create_index(index)\n else:\n logger.info(f\"Search index {self.search_info.index_name} already exists\")\n \n \n===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.searchmanager\n logger = logging.getLogger(\"ingester\")\n \n at: scripts.prepdocslib.searchmanager.SearchManager.__init__\n self.search_info = search_info\n \n self.search_analyzer_name = search_analyzer_name\n \n self.use_acls = use_acls\n \n self.use_int_vectorization = use_int_vectorization\n \n self.search_images = search_images\n \n at: scripts.prepdocslib.strategy.SearchInfo\n create_search_index_client() -> SearchIndexClient\n \n at: scripts.prepdocslib.strategy.SearchInfo.__init__\n self.index_name = index_name\n \n at: typing\n List = _alias(list, 1, inst=False, name='List')\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n "}}},{"rowIdx":5838,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.searchmanager/SearchManager.remove_content"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <0>: logger.info(\n \"Removing sections from '{%s or ''}' from search index '%s'\", path, self.search_info.index_name\n logger.info(f\"Removing sections from '{path or ''}' from search index '{self.search_info.index_name}'\")\n <1>: )\n<13>: logger.info(\"Removed %d sections from index\", len(removed_docs))\n logger.info(f\"\\tRemoved {len(removed_docs)} sections from index\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def remove_content(self, path: Optional[str] = None, only_oid: Optional[str] = None):\n <0> logger.info(f\"Removing sections from '{path or ''}' from search index '{self.search_info.index_name}'\")\n <1> async with self.search_info.create_search_client() as search_client:\n <2> while True:\n <3> filter = None if path is None else f\"sourcefile eq '{os.path.basename(path)}'\"\n <4> result = await search_client.search(\"\", filter=filter, top=1000, include_total_count=True)\n <5> if await result.get_count() == 0:\n <6> break\n <7> documents_to_remove = []\n <8> async for document in result:\n <9> # If only_oid is set, only remove documents that have only this oid\n<10> if not only_oid or document[\"oids\"] == [only_oid]:\n<11> documents_to_remove.append({\"id\": document[\"id\"]})\n<12> removed_docs = await search_client.delete_documents(documents_to_remove)\n<13> logger.info(f\"\\tRemoved {len(removed_docs)} sections from index\")\n<14> # It can take a few seconds for search results to reflect changes, so wait a bit\n<15> await asyncio.sleep(2)\n<16> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: os.path\n basename(p: _PathLike[AnyStr]) -> AnyStr\n basename(p: AnyStr) -> AnyStr\n \n at: scripts.prepdocslib.searchmanager\n logger = logging.getLogger(\"ingester\")\n \n at: scripts.prepdocslib.searchmanager.SearchManager.__init__\n self.search_info = search_info\n \n at: scripts.prepdocslib.strategy.SearchInfo\n create_search_client() -> SearchClient\n \n at: scripts.prepdocslib.strategy.SearchInfo.__init__\n self.index_name = index_name\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n + logger.info(\"Ensuring search index %s exists\", self.search_info.index_name)\n - logger.info(f\"Ensuring search index {self.search_info.index_name} exists\")\n \n async with self.search_info.create_search_index_client() as search_index_client:\n fields = [\n (\n SimpleField(name=\"id\", type=\"Edm.String\", key=True)\n if not self.use_int_vectorization\n else SearchField(\n name=\"id\",\n type=\"Edm.String\",\n key=True,\n sortable=True,\n filterable=True,\n facetable=True,\n analyzer_name=\"keyword\",\n )\n ),\n SearchableField(\n name=\"content\",\n type=\"Edm.String\",\n analyzer_name=self.search_analyzer_name,\n ),\n SearchField(\n name=\"embedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1536,\n vector_search_profile_name=\"embedding_config\",\n ),\n SimpleField(name=\"category\", type=\"Edm.String\", filterable=True, facetable=True),\n SimpleField(\n name=\"sourcepage\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n SimpleField(\n name=\"sourcefile\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n ]\n if self.use_acls:\n fields.append(\n SimpleField(\n name=\"oids\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 1\n \n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n fields.append(\n SimpleField(\n name=\"groups\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n if self.use_int_vectorization:\n fields.append(SearchableField(name=\"parent_id\", type=\"Edm.String\", filterable=True))\n if self.search_images:\n fields.append(\n SearchField(\n name=\"imageEmbedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1024,\n vector_search_profile_name=\"embedding_config\",\n ),\n )\n \n index = SearchIndex(\n name=self.search_info.index_name,\n fields=fields,\n semantic_search=SemanticSearch(\n configurations=[\n SemanticConfiguration(\n name=\"default\",\n prioritized_fields=SemanticPrioritizedFields(\n title_field=None, content_fields=[SemanticField(field_name=\"content\")]\n ),\n )\n ]\n ),\n vector_search=VectorSearch(\n algorithms=[\n HnswAlgorithmConfiguration(\n name=\"hnsw_config\",\n parameters=HnswParameters(metric=\"cosine\"),\n )\n ],\n profiles=[\n VectorSearchProfile(\n name=\"embedding_config\",\n algorithm_configuration_name=\"hnsw_config\",\n vectorizer=(\n f\"{self.search_info.index_name}-vectorizer\n===========changed ref 2===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 2\n self.use_int_vectorization else None\n ),\n ),\n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]:\n + logger.info(\"Creating %s search index\", self.search_info.index_name)\n - logger.info(f\"Creating {self.search_info.index_name} search index\")\n await search_index_client.create_index(index)\n else:\n + logger.info(\"Search index %s already exists\", self.search_info.index_name)\n - logger.info(f\"Search index {self.search_info.index_name} already exists\")\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n "}}},{"rowIdx":5839,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.pdfparser/LocalPdfParser.parse"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <0>: logger.info(\"Extracting text from '%s' using local PDF parser (pypdf)\", content.name)\n logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n <0> logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n <1> \n <2> reader = PdfReader(content)\n <3> pages = reader.pages\n <4> offset = 0\n <5> for page_num, p in enumerate(pages):\n <6> page_text = p.extract_text()\n <7> yield Page(page_num=page_num, offset=offset, text=page_text)\n <8> offset += len(page_text)\n <9> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.parser.Parser\n parse(self, content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.pdfparser\n logger = logging.getLogger(\"ingester\")\n \n at: typing\n AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\n \n IO()\n \n at: typing.IO\n __slots__ = ()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n + logger.info(\"Removing blob %s\", blob_path)\n - logger.info(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def remove_content(self, path: Optional[str] = None, only_oid: Optional[str] = None):\n + logger.info(\n + \"Removing sections from '{%s or ''}' from search index '%s'\", path, self.search_info.index_name\n - logger.info(f\"Removing sections from '{path or ''}' from search index '{self.search_info.index_name}'\")\n + )\n async with self.search_info.create_search_client() as search_client:\n while True:\n filter = None if path is None else f\"sourcefile eq '{os.path.basename(path)}'\"\n result = await search_client.search(\"\", filter=filter, top=1000, include_total_count=True)\n if await result.get_count() == 0:\n break\n documents_to_remove = []\n async for document in result:\n # If only_oid is set, only remove documents that have only this oid\n if not only_oid or document[\"oids\"] == [only_oid]:\n documents_to_remove.append({\"id\": document[\"id\"]})\n removed_docs = await search_client.delete_documents(documents_to_remove)\n + logger.info(\"Removed %d sections from index\", len(removed_docs))\n - logger.info(f\"\\tRemoved {len(removed_docs)} sections from index\")\n # It can take a few seconds for search results to reflect changes, so wait a bit\n await asyncio.sleep(2)\n "}}},{"rowIdx":5840,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.pdfparser/DocumentAnalysisParser.parse"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <0>: logger.info(\"Extracting text from '%s' using Azure Document Intelligence\", content.name)\n logger.info(f\"Extracting text from '{content.name}' using Azure Document Intelligence\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n <0> logger.info(f\"Extracting text from '{content.name}' using Azure Document Intelligence\")\n <1> \n <2> async with DocumentIntelligenceClient(\n <3> endpoint=self.endpoint, credential=self.credential\n <4> ) as document_intelligence_client:\n <5> poller = await document_intelligence_client.begin_analyze_document(\n <6> model_id=self.model_id, analyze_request=content, content_type=\"application/octet-stream\"\n <7> )\n <8> form_recognizer_results = await poller.result()\n <9> \n<10> offset = 0\n<11> for page_num, page in enumerate(form_recognizer_results.pages):\n<12> tables_on_page = [\n<13> table\n<14> for table in (form_recognizer_results.tables or [])\n<15> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1\n<16> ]\n<17> \n<18> # mark all positions of the table spans in the page\n<19> page_offset = page.spans[0].offset\n<20> page_length = page.spans[0].length\n<21> table_chars = [-1] * page_length\n<22> for table_id, table in enumerate(tables_on_page):\n<23> for span in table.spans:\n<24> # replace all table spans with \"table_id\" in table_chars array\n<25> for i in range(span.length):\n<26> idx = span.offset - page_offset + i\n<27> if idx >= 0 and idx < page_length:\n<28> table_chars[idx] = table_id\n<29> \n<30> # build page text by replacing characters in table spans with table html\n<31> page_text = \"\"\n<32> added_tables = set()\n<33> for idx, table_id in enumerate(table_chars):\n<34> if table_id == -1:\n<35> page_text += form_recognizer_results.content[page_"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.pdfparser\n class DocumentAnalysisParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n # offset: 1\n elif table_id not in added_tables:\n page_text += DocumentAnalysisParser.table_to_html(tables_on_page[table_id])\n added_tables.add(table_id)\n \n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n \n===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.page\n Page(page_num: int, offset: int, text: str)\n \n at: scripts.prepdocslib.parser.Parser\n parse(self, content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.pdfparser\n logger = logging.getLogger(\"ingester\")\n \n DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id=\"prebuilt-layout\")\n \n at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser\n table_to_html(table: DocumentTable)\n \n at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser.__init__\n self.model_id = model_id\n \n self.endpoint = endpoint\n \n self.credential = credential\n \n at: typing\n AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\n \n IO()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n + logger.info(\"Extracting text from '%s' using local PDF parser (pypdf)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n + logger.info(\"Removing blob %s\", blob_path)\n - logger.info(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n "}}},{"rowIdx":5841,"cells":{"path":{"kind":"string","value":"scripts.prepdocs/setup_list_file_strategy"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <5>: logger.info(\"Using Data Lake Gen2 Storage Account: %s\", datalake_storage_account)\n logger.info(f\"Using Data Lake Gen2 Storage Account {datalake_storage_account}\")\n<13>: logger.info(\"Using local files: %s\", local_files)\n logger.info(f\"Using local files in {local_files}\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocs\n def setup_list_file_strategy(\n azure_credential: AsyncTokenCredential,\n local_files: Union[str, None],\n datalake_storage_account: Union[str, None],\n datalake_filesystem: Union[str, None],\n datalake_path: Union[str, None],\n datalake_key: Union[str, None],\n ):\n <0> list_file_strategy: ListFileStrategy\n <1> if datalake_storage_account:\n <2> if datalake_filesystem is None or datalake_path is None:\n <3> raise ValueError(\"DataLake file system and path are required when using Azure Data Lake Gen2\")\n <4> adls_gen2_creds: Union[AsyncTokenCredential, str] = azure_credential if datalake_key is None else datalake_key\n <5> logger.info(f\"Using Data Lake Gen2 Storage Account {datalake_storage_account}\")\n <6> list_file_strategy = ADLSGen2ListFileStrategy(\n <7> data_lake_storage_account=datalake_storage_account,\n <8> data_lake_filesystem=datalake_filesystem,\n <9> data_lake_path=datalake_path,\n<10> credential=adls_gen2_creds,\n<11> )\n<12> elif local_files:\n<13> logger.info(f\"Using local files in {local_files}\")\n<14> list_file_strategy = LocalListFileStrategy(path_pattern=local_files)\n<15> else:\n<16> raise ValueError(\"Either local_files or datalake_storage_account must be provided.\")\n<17> return list_file_strategy\n<18> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: prepdocslib.listfilestrategy\n ListFileStrategy()\n \n LocalListFileStrategy(path_pattern: str)\n \n ADLSGen2ListFileStrategy(data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str])\n \n at: scripts.prepdocs\n logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 0===========\n + # module: tests.test_fetch_image\n + \n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n + logger.info(\"Extracting text from '%s' using local PDF parser (pypdf)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n + logger.info(\"Removing blob %s\", blob_path)\n - logger.info(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n "}}},{"rowIdx":5842,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.filestrategy/parse_file"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <3>: logger.info(\"Skipping '%s', no parser found.\", file.filename())\n logger.info(f\"Skipping '{file.filename()}', no parser found.\")\n <5>: logger.info(\"Ingesting '%s'\", file.filename())\n logger.info(f\"Parsing '{file.filename()}'\")\n <7>: logger.info(\"Splitting '%s' into sections\", file.filename())\n if image_embeddings:\n logger.warning(\"Each page will be split into smaller chunks of text, but images will be of the entire page.\")\n logger.info(f\"Splitting '{file.filename()}' into sections\")\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.filestrategy\n def parse_file(\n + file: File,\n + file_processors: dict[str, FileProcessor],\n + category: Optional[str] = None,\n + image_embeddings: Optional[ImageEmbeddings] = None,\n - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None\n ) -> List[Section]:\n <0> key = file.file_extension()\n <1> processor = file_processors.get(key)\n <2> if processor is None:\n <3> logger.info(f\"Skipping '{file.filename()}', no parser found.\")\n <4> return []\n <5> logger.info(f\"Parsing '{file.filename()}'\")\n <6> pages = [page async for page in processor.parser.parse(content=file.content)]\n <7> logger.info(f\"Splitting '{file.filename()}' into sections\")\n <8> sections = [\n <9> Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages)\n<10> ]\n<11> return sections\n<12> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.embeddings\n ImageEmbeddings(endpoint: str, token_provider: Callable[[], Awaitable[str]])\n \n at: scripts.prepdocslib.fileprocessor\n FileProcessor(parser: Parser, splitter: TextSplitter)\n \n at: scripts.prepdocslib.fileprocessor.FileProcessor\n parser: Parser\n \n splitter: TextSplitter\n \n at: scripts.prepdocslib.filestrategy\n logger = logging.getLogger(\"ingester\")\n \n at: scripts.prepdocslib.listfilestrategy\n File(content: IO, acls: Optional[dict[str, list]]=None)\n \n at: scripts.prepdocslib.listfilestrategy.File\n filename()\n \n file_extension()\n \n at: scripts.prepdocslib.listfilestrategy.File.__init__\n self.content = content\n \n at: scripts.prepdocslib.parser.Parser\n parse(content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.searchmanager\n Section(split_page: SplitPage, content: File, category: Optional[str]=None)\n \n at: typing\n List = _alias(list, 1, inst=False, name='List')\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n + # module: tests.test_fetch_image\n + \n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n + logger.info(\"Extracting text from '%s' using local PDF parser (pypdf)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n + logger.info(\"Removing blob %s\", blob_path)\n - logger.info(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n "}}},{"rowIdx":5843,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.filestrategy/FileStrategy.run"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <7>: sections = await parse_file(file, self.file_processors, self.category, self.image_embeddings)\n sections = await parse_file(file, self.file_processors, self.category)\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.filestrategy\n class FileStrategy(Strategy):\n def run(self):\n <0> search_manager = SearchManager(\n <1> self.search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings\n <2> )\n <3> if self.document_action == DocumentAction.Add:\n <4> files = self.list_file_strategy.list()\n <5> async for file in files:\n <6> try:\n <7> sections = await parse_file(file, self.file_processors, self.category)\n <8> if sections:\n <9> blob_sas_uris = await self.blob_manager.upload_blob(file)\n<10> blob_image_embeddings: Optional[List[List[float]]] = None\n<11> if self.image_embeddings and blob_sas_uris:\n<12> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris)\n<13> await search_manager.update_content(sections, blob_image_embeddings)\n<14> finally:\n<15> if file:\n<16> file.close()\n<17> elif self.document_action == DocumentAction.Remove:\n<18> paths = self.list_file_strategy.list_paths()\n<19> async for path in paths:\n<20> await self.blob_manager.remove_blob(path)\n<21> await search_manager.remove_content(path)\n<22> elif self.document_action == DocumentAction.RemoveAll:\n<23> await self.blob_manager.remove_blob()\n<24> await search_manager.remove_content()\n<25> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.blobmanager.BlobManager\n upload_blob(file: File) -> Optional[List[str]]\n \n at: scripts.prepdocslib.embeddings.ImageEmbeddings\n create_embeddings(blob_urls: List[str]) -> List[List[float]]\n \n at: scripts.prepdocslib.filestrategy\n parse_file(file: File, file_processors: dict[str, FileProcessor], category: Optional[str]=None, image_embeddings: Optional[ImageEmbeddings]=None) -> List[Section]\n \n at: scripts.prepdocslib.filestrategy.FileStrategy.__init__\n self.list_file_strategy = list_file_strategy\n \n self.blob_manager = blob_manager\n \n self.file_processors = file_processors\n \n self.document_action = document_action\n \n self.embeddings = embeddings\n \n self.image_embeddings = image_embeddings\n \n self.search_analyzer_name = search_analyzer_name\n \n self.search_info = search_info\n \n self.use_acls = use_acls\n \n self.category = category\n \n at: scripts.prepdocslib.filestrategy.FileStrategy.setup\n search_manager = SearchManager(\n self.search_info,\n self.search_analyzer_name,\n self.use_acls,\n False,\n self.embeddings,\n search_images=self.image_embeddings is not None,\n )\n \n at: scripts.prepdocslib.listfilestrategy.ListFileStrategy\n list() -> AsyncGenerator[File, None]\n \n list_paths() -> AsyncGenerator[str, None]\n \n at: scripts.prepdocslib.searchmanager\n SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False)\n \n \n===========unchanged ref 1===========\n at: scripts.prepdocslib.searchmanager.SearchManager\n create_index(vectorizers: Optional[List[VectorSearchVectorizer]]=None)\n \n update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None)\n \n at: scripts.prepdocslib.strategy\n DocumentAction()\n \n at: scripts.prepdocslib.strategy.Strategy\n run(self)\n \n at: typing\n List = _alias(list, 1, inst=False, name='List')\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.filestrategy\n def parse_file(\n + file: File,\n + file_processors: dict[str, FileProcessor],\n + category: Optional[str] = None,\n + image_embeddings: Optional[ImageEmbeddings] = None,\n - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None\n ) -> List[Section]:\n key = file.file_extension()\n processor = file_processors.get(key)\n if processor is None:\n + logger.info(\"Skipping '%s', no parser found.\", file.filename())\n - logger.info(f\"Skipping '{file.filename()}', no parser found.\")\n return []\n + logger.info(\"Ingesting '%s'\", file.filename())\n - logger.info(f\"Parsing '{file.filename()}'\")\n pages = [page async for page in processor.parser.parse(content=file.content)]\n + logger.info(\"Splitting '%s' into sections\", file.filename())\n + if image_embeddings:\n + logger.warning(\"Each page will be split into smaller chunks of text, but images will be of the entire page.\")\n - logger.info(f\"Splitting '{file.filename()}' into sections\")\n sections = [\n Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages)\n ]\n return sections\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def upload_blob(self, file: File) -> Optional[List[str]]:\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n await container_client.create_container()\n \n # Re-open and upload the original file\n with open(file.content.name, \"rb\") as reopened_file:\n blob_name = BlobManager.blob_name_from_file_name(file.content.name)\n + logger.info(\"Uploading blob for whole file -> %s\", blob_name)\n - logger.info(f\"\\tUploading blob for whole file -> {blob_name}\")\n await container_client.upload_blob(blob_name, reopened_file, overwrite=True)\n \n + if self.store_page_images:\n + if os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == \".pdf\":\n + return await self.upload_pdf_blob_images(service_client, container_client, file)\n - return await self.upload_pdf_blob_images(service_client, container_client, file)\n + else:\n + logger.info(\"File %s is not a PDF, skipping image upload\", file.content.name)\n \n return None\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n + logger.info(\"Ensuring search index %s exists\", self.search_info.index_name)\n - logger.info(f\"Ensuring search index {self.search_info.index_name} exists\")\n \n async with self.search_info.create_search_index_client() as search_index_client:\n fields = [\n (\n SimpleField(name=\"id\", type=\"Edm.String\", key=True)\n if not self.use_int_vectorization\n else SearchField(\n name=\"id\",\n type=\"Edm.String\",\n key=True,\n sortable=True,\n filterable=True,\n facetable=True,\n analyzer_name=\"keyword\",\n )\n ),\n SearchableField(\n name=\"content\",\n type=\"Edm.String\",\n analyzer_name=self.search_analyzer_name,\n ),\n SearchField(\n name=\"embedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1536,\n vector_search_profile_name=\"embedding_config\",\n ),\n SimpleField(name=\"category\", type=\"Edm.String\", filterable=True, facetable=True),\n SimpleField(\n name=\"sourcepage\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n SimpleField(\n name=\"sourcefile\",\n type=\"Edm.String\",\n filterable=True,\n facetable=True,\n ),\n ]\n if self.use_acls:\n fields.append(\n SimpleField(\n name=\"oids\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n "}}},{"rowIdx":5844,"cells":{"path":{"kind":"string","value":"app.backend.core.imageshelper/download_blob_as_base64"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"d896376069d3350dc733638ae86e811beba53d9f"},"commit_message":{"kind":"string","value":"Don't disable text chunking when GPT4vision is enabled (#1355)"},"ground_truth":{"kind":"string","value":" <1>: image_filename = base_name + \".png\"\n try:\n blob = await blob_container_client.get_blob_client(image_filename).download_blob()\n blob = await blob_container_client.get_blob_client(base_name + \".png\").download_blob()\n <2>: if not blob.properties:\n logging.warning(f\"No blob exists for {image_filename}\")\n return None\n img = base64.b64encode(await blob.readall()).decode(\"utf-8\")\n return f\"data:image/png;base64,{img}\"\n except ResourceNotFoundError:\n logging.warning(f\"No blob exists for {image_filename}\")\n return None\n <3>: if not blob.properties:\n <4>: return None\n <5>: img = base64.b64encode(await blob.readall()).decode(\"utf-8\")\n <6>: return f\"data:image/png;base64,{img}\"\n <7>: \n"},"main_code":{"kind":"string","value":" # module: app.backend.core.imageshelper\n def download_blob_as_base64(blob_container_client: ContainerClient, file_path: str) -> Optional[str]:\n <0> base_name, _ = os.path.splitext(file_path)\n <1> blob = await blob_container_client.get_blob_client(base_name + \".png\").download_blob()\n <2> \n <3> if not blob.properties:\n <4> return None\n <5> img = base64.b64encode(await blob.readall()).decode(\"utf-8\")\n <6> return f\"data:image/png;base64,{img}\"\n <7> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: os.path\n splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr]\n splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr]\n \n \n===========changed ref 0===========\n + # module: tests.test_fetch_image\n + \n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.pdfparser\n class LocalPdfParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n + logger.info(\"Extracting text from '%s' using local PDF parser (pypdf)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local PDF parser (pypdf)\")\n \n reader = PdfReader(content)\n pages = reader.pages\n offset = 0\n for page_num, p in enumerate(pages):\n page_text = p.extract_text()\n yield Page(page_num=page_num, offset=offset, text=page_text)\n offset += len(page_text)\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n + logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.htmlparser\n class LocalHTMLParser(Parser):\n def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n \"\"\"Parses the given content.\n To learn more, please visit https://pypi.org/project/beautifulsoup4/\n Args:\n content (IO): The content to parse.\n Returns:\n Page: The parsed html Page.\n \"\"\"\n + logger.info(\"Extracting text from '%s' using local HTML parser (BeautifulSoup)\", content.name)\n - logger.info(f\"\\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)\")\n \n data = content.read()\n soup = BeautifulSoup(data, \"html.parser\")\n \n # Get text only from html file\n result = soup.get_text()\n \n yield Page(0, 0, text=cleanup_data(result))\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.filestrategy\n def parse_file(\n + file: File,\n + file_processors: dict[str, FileProcessor],\n + category: Optional[str] = None,\n + image_embeddings: Optional[ImageEmbeddings] = None,\n - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None\n ) -> List[Section]:\n key = file.file_extension()\n processor = file_processors.get(key)\n if processor is None:\n + logger.info(\"Skipping '%s', no parser found.\", file.filename())\n - logger.info(f\"Skipping '{file.filename()}', no parser found.\")\n return []\n + logger.info(\"Ingesting '%s'\", file.filename())\n - logger.info(f\"Parsing '{file.filename()}'\")\n pages = [page async for page in processor.parser.parse(content=file.content)]\n + logger.info(\"Splitting '%s' into sections\", file.filename())\n + if image_embeddings:\n + logger.warning(\"Each page will be split into smaller chunks of text, but images will be of the entire page.\")\n - logger.info(f\"Splitting '{file.filename()}' into sections\")\n sections = [\n Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages)\n ]\n return sections\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n + logger.info(\n + \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n + len(batch.texts),\n + batch.token_length,\n + )\n - logger.info(f\"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}\")\n \n return embeddings\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.listfilestrategy\n class LocalListFileStrategy(ListFileStrategy):\n def check_md5(self, path: str) -> bool:\n # if filename ends in .md5 skip\n if path.endswith(\".md5\"):\n return True\n \n # if there is a file called .md5 in this directory, see if its updated\n stored_hash = None\n with open(path, \"rb\") as file:\n existing_hash = hashlib.md5(file.read()).hexdigest()\n hash_path = f\"{path}.md5\"\n if os.path.exists(hash_path):\n with open(hash_path, encoding=\"utf-8\") as md5_f:\n stored_hash = md5_f.read()\n \n if stored_hash and stored_hash.strip() == existing_hash.strip():\n + logger.info(\"Skipping %s, no changes detected.\", path)\n - logger.info(f\"Skipping {path}, no changes detected.\")\n return True\n \n # Write the hash\n with open(hash_path, \"w\", encoding=\"utf-8\") as md5_f:\n md5_f.write(existing_hash)\n \n return False\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.blobmanager\n class BlobManager:\n def remove_blob(self, path: Optional[str] = None):\n async with BlobServiceClient(\n account_url=self.endpoint, credential=self.credential\n ) as service_client, service_client.get_container_client(self.container) as container_client:\n if not await container_client.exists():\n return\n if path is None:\n prefix = None\n blobs = container_client.list_blob_names()\n else:\n prefix = os.path.splitext(os.path.basename(path))[0]\n blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])\n async for blob_path in blobs:\n # This still supports PDFs split into individual pages, but we could remove in future to simplify code\n if (\n prefix is not None\n and (\n not re.match(rf\"{prefix}-\\d+\\.pdf\", blob_path) or not re.match(rf\"{prefix}-\\d+\\.png\", blob_path)\n )\n ) or (path is not None and blob_path == os.path.basename(path)):\n continue\n + logger.info(\"Removing blob %s\", blob_path)\n - logger.info(f\"\\tRemoving blob {blob_path}\")\n await container_client.delete_blob(blob_path)\n "}}},{"rowIdx":5845,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.textsplitter/SentenceTextSplitter.split_page_by_max_tokens"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a45774a90d8557b90f89f06be7e888b8ee476bb8"},"commit_message":{"kind":"string","value":"Ensure there are no zero-length sections for batch API (#1423)"},"ground_truth":{"kind":"string","value":"<30>: middle = int(len(text) // 2)\n overlap = int(len(text) * (DEFAULT_OVERLAP_PERCENT / 100))\n first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))]\n<31>: first_half = text[: middle + overlap]\n second_half = text[middle - overlap :]\n second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :]\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]:\n <0> \"\"\"\n <1> Recursively splits page by maximum number of tokens to better handle languages with higher token/word ratios.\n <2> \"\"\"\n <3> tokens = bpe.encode(text)\n <4> if len(tokens) <= self.max_tokens_per_section:\n <5> # Section is already within max tokens, return\n <6> yield SplitPage(page_num=page_num, text=text)\n <7> else:\n <8> # Start from the center and try and find the closest sentence ending by spiralling outward.\n <9> # IF we get to the outer thirds, then just split in half with a 5% overlap\n<10> start = int(len(text) // 2)\n<11> pos = 0\n<12> boundary = int(len(text) // 3)\n<13> split_position = -1\n<14> while start - pos > boundary:\n<15> if text[start - pos] in self.sentence_endings:\n<16> split_position = start - pos\n<17> break\n<18> elif text[start + pos] in self.sentence_endings:\n<19> split_position = start + pos\n<20> break\n<21> else:\n<22> pos += 1\n<23> \n<24> if split_position > 0:\n<25> first_half = text[: split_position + 1]\n<26> second_half = text[split_position + 1 :]\n<27> else:\n<28> # Split page in half and call function again\n<29> # Overlap first and second halves by DEFAULT_OVERLAP_PERCENT%\n<30> first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))]\n<31> second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :]\n<32> yield from self.split_page_by_max_tokens(page_num, first_half)\n<33> yield from self."},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]:\n # offset: 1\n \n \n===========unchanged ref 0===========\n at: scripts.prepdocslib.page\n SplitPage(page_num: int, text: str)\n \n at: scripts.prepdocslib.textsplitter\n bpe = tiktoken.encoding_for_model(ENCODING_MODEL)\n \n DEFAULT_OVERLAP_PERCENT = 10 # See semantic search article for 10% overlap performance\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__\n self.sentence_endings = STANDARD_SENTENCE_ENDINGS + CJK_SENTENCE_ENDINGS\n \n self.max_tokens_per_section = max_tokens_per_section\n \n at: tiktoken.core.Encoding\n encode(text: str, *, allowed_special: Union[Literal[\"all\"], AbstractSet[str]]=set(), disallowed_special: Union[Literal[\"all\"], Collection[str]]=\"all\") -> list[int]\n \n at: typing\n Generator = _alias(collections.abc.Generator, 3)\n \n "}}},{"rowIdx":5846,"cells":{"path":{"kind":"string","value":"tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_multilang"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a45774a90d8557b90f89f06be7e888b8ee476bb8"},"commit_message":{"kind":"string","value":"Ensure there are no zero-length sections for batch API (#1423)"},"ground_truth":{"kind":"string","value":"<22>: assert section.split_page.text != \"\"\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocslib_textsplitter\n @pytest.mark.asyncio\n async def test_sentencetextsplitter_multilang(test_doc, tmp_path):\n <0> text_splitter = SentenceTextSplitter(has_image_embeddings=False)\n <1> bpe = tiktoken.encoding_for_model(ENCODING_MODEL)\n <2> pdf_parser = LocalPdfParser()\n <3> \n <4> shutil.copy(str(test_doc.absolute()), tmp_path)\n <5> \n <6> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / \"*\"))\n <7> files = list_file_strategy.list()\n <8> processed = 0\n <9> async for file in files:\n<10> pages = [page async for page in pdf_parser.parse(content=file.content)]\n<11> assert pages\n<12> sections = [\n<13> Section(split_page, content=file, category=\"test category\")\n<14> for split_page in text_splitter.split_pages(pages)\n<15> ]\n<16> assert sections\n<17> processed += 1\n<18> \n<19> # Verify the size of the sections\n<20> token_lengths = []\n<21> for section in sections:\n<22> assert len(section.split_page.text) <= (text_splitter.max_section_length * 1.2)\n<23> # Verify the number of tokens is below 500\n<24> token_lengths.append((len(bpe.encode(section.split_page.text)), len(section.split_page.text)))\n<25> # verify that none of the numbers in token_lengths are above 500\n<26> assert all([tok_len <= text_splitter.max_tokens_per_section for tok_len, _ in token_lengths]), (\n<27> test_doc.name,\n<28> token_lengths,\n<29> )\n<30> assert processed == 1\n<31> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: scripts.prepdocslib.listfilestrategy\n LocalListFileStrategy(path_pattern: str)\n \n at: scripts.prepdocslib.listfilestrategy.File.__init__\n self.content = content\n \n at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy\n list() -> AsyncGenerator[File, None]\n \n at: scripts.prepdocslib.page.SplitPage.__init__\n self.text = text\n \n at: scripts.prepdocslib.pdfparser\n LocalPdfParser()\n \n at: scripts.prepdocslib.pdfparser.LocalPdfParser\n parse(content: IO) -> AsyncGenerator[Page, None]\n \n at: scripts.prepdocslib.searchmanager\n Section(split_page: SplitPage, content: File, category: Optional[str]=None)\n \n at: scripts.prepdocslib.searchmanager.Section.__init__\n self.split_page = split_page\n \n at: scripts.prepdocslib.textsplitter\n ENCODING_MODEL = \"text-embedding-ada-002\"\n \n SentenceTextSplitter(has_image_embeddings: bool, max_tokens_per_section: int=500)\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter\n split_pages(pages: List[Page]) -> Generator[SplitPage, None, None]\n \n at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__\n self.max_section_length = DEFAULT_SECTION_LENGTH\n \n self.max_tokens_per_section = max_tokens_per_section\n \n at: shutil\n copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn\n \n at: tiktoken.core.Encoding\n encode(text: str, *, allowed_special: Union[Literal[\"all\"], AbstractSet[str]]=set(), disallowed_special: Union[Literal[\"all\"], Collection[str]]=\"all\") -> list[int]\n \n \n===========unchanged ref 1===========\n at: tiktoken.model\n encoding_for_model(model_name: str) -> Encoding\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]:\n \"\"\"\n Recursively splits page by maximum number of tokens to better handle languages with higher token/word ratios.\n \"\"\"\n tokens = bpe.encode(text)\n if len(tokens) <= self.max_tokens_per_section:\n # Section is already within max tokens, return\n yield SplitPage(page_num=page_num, text=text)\n else:\n # Start from the center and try and find the closest sentence ending by spiralling outward.\n # IF we get to the outer thirds, then just split in half with a 5% overlap\n start = int(len(text) // 2)\n pos = 0\n boundary = int(len(text) // 3)\n split_position = -1\n while start - pos > boundary:\n if text[start - pos] in self.sentence_endings:\n split_position = start - pos\n break\n elif text[start + pos] in self.sentence_endings:\n split_position = start + pos\n break\n else:\n pos += 1\n \n if split_position > 0:\n first_half = text[: split_position + 1]\n second_half = text[split_position + 1 :]\n else:\n # Split page in half and call function again\n # Overlap first and second halves by DEFAULT_OVERLAP_PERCENT%\n + middle = int(len(text) // 2)\n + overlap = int(len(text) * (DEFAULT_OVERLAP_PERCENT / 100))\n - first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))]\n + first_half = text[: middle + overlap]\n + second_half = text[middle - overlap :]\n - second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100\n===========changed ref 1===========\n # module: scripts.prepdocslib.textsplitter\n class SentenceTextSplitter(TextSplitter):\n def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]:\n # offset: 1\n second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :]\n yield from self.split_page_by_max_tokens(page_num, first_half)\n yield from self.split_page_by_max_tokens(page_num, second_half)\n "}}},{"rowIdx":5847,"cells":{"path":{"kind":"string","value":"app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"ccf2494f3eadfae02126c2ec6b3be74d38e83618"},"commit_message":{"kind":"string","value":"Add minimum score criteria for AI search results (#1417)"},"ground_truth":{"kind":"string","value":"<12>: minimum_search_score = overrides.get(\"minimum_search_score\", 0.0)\n minimum_reranker_score = overrides.get(\"minimum_reranker_score\", 0.0)\n"},"main_code":{"kind":"string","value":" # module: app.backend.approaches.retrievethenreadvision\n class RetrieveThenReadVisionApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n <0> q = messages[-1][\"content\"]\n <1> overrides = context.get(\"overrides\", {})\n <2> auth_claims = context.get(\"auth_claims\", {})\n <3> has_text = overrides.get(\"retrieval_mode\") in [\"text\", \"hybrid\", None]\n <4> has_vector = overrides.get(\"retrieval_mode\") in [\"vectors\", \"hybrid\", None]\n <5> vector_fields = overrides.get(\"vector_fields\", [\"embedding\"])\n <6> \n <7> include_gtpV_text = overrides.get(\"gpt4v_input\") in [\"textAndImages\", \"texts\", None]\n <8> include_gtpV_images = overrides.get(\"gpt4v_input\") in [\"textAndImages\", \"images\", None]\n <9> \n<10> use_semantic_captions = True if overrides.get(\"semantic_captions\") and has_text else False\n<11> top = overrides.get(\"top\", 3)\n<12> filter = self.build_filter(overrides, auth_claims)\n<13> use_semantic_ranker = overrides.get(\"semantic_ranker\") and has_text\n<14> \n<15> # If retrieval mode includes vectors, compute an embedding for the query\n<16> \n<17> vectors = []\n<18> if has_vector:\n<19> for field in vector_fields:\n<20> vector = (\n<21> await self.compute_text_embedding(q)\n<22> if field == \"embedding\"\n<23> else await self.compute_image_embedding(q)\n<24> )\n<25> vectors.append(vector)\n<26> \n<27> # Only keep the text query if the retrieval mode uses text, otherwise drop it\n<28> query_text = q if has_text else"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: app.backend.approaches.retrievethenreadvision\n class RetrieveThenReadVisionApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n # offset: 1\n \n results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions)\n \n image_list: list[ChatCompletionContentPartImageParam] = []\n user_content: list[ChatCompletionContentPartParam] = [{\"text\": q, \"type\": \"text\"}]\n \n template = overrides.get(\"prompt_template\", self.system_chat_template_gpt4v)\n model = self.gpt4v_model\n message_builder = MessageBuilder(template, model)\n \n # Process results\n \n sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True)\n \n if include_gtpV_text:\n content = \"\\n\".join(sources_content)\n user_content.append({\"text\": content, \"type\": \"text\"})\n if include_gtpV_images:\n for result in results:\n url = await fetch_image(self.blob_container_client, result)\n if url:\n image_list.append({\"image_url\": url, \"type\": \"image_url\"})\n user_content.extend(image_list)\n \n # Append user message\n message_builder.insert_message(\"user\", user_content)\n updated_messages = message_builder.messages\n chat_completion = (\n await self.openai_client.chat.completions.create(\n model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model,\n messages=updated_messages,\n temperature=\n===========below chunk 1===========\n # module: app.backend.approaches.retrievethenreadvision\n class RetrieveThenReadVisionApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n # offset: 2\n self.gpt4v_deployment else self.gpt4v_model,\n messages=updated_messages,\n temperature=overrides.get(\"temperature\", 0.3),\n max_tokens=1024,\n n=1,\n )\n ).model_dump()\n \n data_points = {\n \"text\": sources_content,\n \"images\": [d[\"image_url\"] for d in image_list],\n }\n \n extra_info = {\n \"data_points\": data_points,\n \"thoughts\": [\n ThoughtStep(\n \"Search using user query\",\n query_text,\n {\n \"use_semantic_captions\": use_semantic_captions,\n \"use_semantic_ranker\": use_semantic_ranker,\n \"top\": top,\n \"filter\": filter,\n \"vector_fields\": vector_fields,\n },\n ),\n ThoughtStep(\n \"Search results\",\n [result.serialize_for_results() for result in results],\n ),\n ThoughtStep(\n \"Prompt to generate answer\",\n [str(message) for message in updated_messages],\n (\n {\"model\": self.gpt4v_model, \"deployment\": self.gpt4v_deployment}\n if self.gpt4v_deployment\n else {\"model\": self.gpt4v_model}\n ),\n ),\n ],\n }\n chat_\n===========below chunk 2===========\n # module: app.backend.approaches.retrievethenreadvision\n class RetrieveThenReadVisionApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n # offset: 3\n choices\"][0][\"context\"] = extra_info\n chat_completion[\"choices\"][0][\"session_state\"] = session_state\n return chat_completion\n \n \n===========unchanged ref 0===========\n at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach\n system_chat_template_gpt4v = (\n \"You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. \"\n + \"Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName: \"\n + \"Each text source starts in a new line and has the file name followed by colon and the actual information \"\n + \"Always include the source name from the image or text for each fact you use in the response in the format: [filename] \"\n + \"Answer the following question using only the data provided in the sources below. \"\n + \"For tabular information return it as an html table. Do not return markdown format. \"\n + \"The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned \"\n + \"If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts \"\n )\n \n at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__\n self.blob_container_client = blob_container_client\n \n self.openai_client = openai_client\n \n self.gpt4v_deployment = gpt4v_deployment\n \n self.gpt4v_model = gpt4v_model\n \n at: approaches.approach\n ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None)\n \n at: approaches.approach.Approach\n build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]\n \n "}}},{"rowIdx":5848,"cells":{"path":{"kind":"string","value":"app.backend.approaches.approach/Approach.search"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"ccf2494f3eadfae02126c2ec6b3be74d38e83618"},"commit_message":{"kind":"string","value":"Add minimum score criteria for AI search results (#1417)"},"ground_truth":{"kind":"string","value":""},"main_code":{"kind":"string","value":" # module: app.backend.approaches.approach\n class Approach(ABC):\n def search(\n self,\n top: int,\n query_text: Optional[str],\n filter: Optional[str],\n vectors: List[VectorQuery],\n use_semantic_ranker: bool,\n use_semantic_captions: bool,\n + minimum_search_score: Optional[float],\n + minimum_reranker_score: Optional[float],\n ) -> List[Document]:\n <0> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text)\n <1> if use_semantic_ranker and query_text:\n <2> results = await self.search_client.search(\n <3> search_text=query_text,\n <4> filter=filter,\n <5> query_type=QueryType.SEMANTIC,\n <6> query_language=self.query_language,\n <7> query_speller=self.query_speller,\n <8> semantic_configuration_name=\"default\",\n <9> top=top,\n<10> query_caption=\"extractive|highlight-false\" if use_semantic_captions else None,\n<11> vector_queries=vectors,\n<12> )\n<13> else:\n<14> results = await self.search_client.search(\n<15> search_text=query_text or \"\", filter=filter, top=top, vector_queries=vectors\n<16> )\n<17> \n<18> documents = []\n<19> async for page in results.by_page():\n<20> async for document in page:\n<21> documents.append(\n<22> Document(\n<23> id=document.get(\"id\"),\n<24> content=document.get(\"content\"),\n<25> embedding=document.get(\"embedding\"),\n<26> image_embedding=document.get(\"imageEmbedding\"),\n<27> category=document.get(\"category\"),\n<28> sourcepage=document.get(\"sourcepage\"),\n<29> sourcefile=document.get(\"sourcefile\"),\n<30> oids=document.get(\"oids\"),\n<31> groups=document.get(\"groups\"),\n<32> captions=cast(List["},"context":{"kind":"string","value":"===========below chunk 0===========\n : app.backend.approaches.approach\n class Approach(ABC):\n def search(\n self,\n top: int,\n query_text: Optional[str],\n filter: Optional[str],\n vectors: List[VectorQuery],\n use_semantic_ranker: bool,\n use_semantic_captions: bool,\n + minimum_search_score: Optional[float],\n + minimum_reranker_score: Optional[float],\n ) -> List[Document]:\n # offset: 1\n score=document.get(\"@search.score\"),\n reranker_score=document.get(\"@search.reranker_score\"),\n )\n )\n return documents\n \n \n===========unchanged ref 0===========\n at: app.backend.approaches.approach\n Document(id: Optional[str], content: Optional[str], embedding: Optional[List[float]], image_embedding: Optional[List[float]], category: Optional[str], sourcepage: Optional[str], sourcefile: Optional[str], oids: Optional[List[str]], groups: Optional[List[str]], captions: List[QueryCaptionResult], score: Optional[float]=None, reranker_score: Optional[float]=None)\n \n at: app.backend.approaches.approach.Approach.__init__\n self.search_client = search_client\n \n self.query_language = query_language\n \n self.query_speller = query_speller\n \n at: app.backend.approaches.approach.Document\n id: Optional[str]\n \n content: Optional[str]\n \n embedding: Optional[List[float]]\n \n image_embedding: Optional[List[float]]\n \n category: Optional[str]\n \n sourcepage: Optional[str]\n \n sourcefile: Optional[str]\n \n oids: Optional[List[str]]\n \n groups: Optional[List[str]]\n \n captions: List[QueryCaptionResult]\n \n score: Optional[float] = None\n \n reranker_score: Optional[float] = None\n \n at: typing\n cast(typ: Type[_T], val: Any) -> _T\n cast(typ: str, val: Any) -> Any\n cast(typ: object, val: Any) -> Any\n \n List = _alias(list, 1, inst=False, name='List')\n \n \n===========changed ref 0===========\n # module: tests.test_chatapproach\n + def mock_search(*args, **kwargs):\n + return MockAsyncSearchResultsIterator(kwargs.get(\"search_text\"), kwargs.get(\"vector_queries\"))\n + \n===========changed ref 1===========\n (0, 0, 1),\n + (0, 2, 1),\n + (0.03, 0, 1),\n + (0.03, 2, 1),\n + (1, 0, 0),\n + (0, 4, 0),\n + (1, 4, 0),\n + ],\n + )\n + async def test_search_results_filtering_by_scores(\n + monkeypatch, minimum_search_score, minimum_reranker_score, expected_result_count\n + ):\n + chat_approach = ChatReadRetrieveReadApproach(\n + search_client=SearchClient(endpoint=\"\", index_name=\"\", credential=AzureKeyCredential(\"\")),\n + auth_helper=None,\n + openai_client=None,\n + chatgpt_model=\"gpt-35-turbo\",\n + chatgpt_deployment=\"chat\",\n + embedding_deployment=\"embeddings\",\n + embedding_model=\"text-\",\n + sourcepage_field=\"\",\n + content_field=\"\",\n + query_language=\"en-us\",\n + query_speller=\"lexicon\",\n + )\n + \n + monkeypatch.setattr(SearchClient, \"search\", mock_search)\n + \n + filtered_results = await chat_approach.search(\n + top=10,\n + query_text=\"test query\",\n + filter=None,\n + vectors=[],\n + use_semantic_ranker=True,\n + use_semantic_captions=True,\n + minimum_search_score=minimum_search_score,\n + minimum_reranker_score=minimum_reranker_score,\n + )\n + \n + assert (\n + len(filtered_results) == expected_result_count\n + ), f\"Expected {expected_result_count} results with minimum_search_score={minimum_search_score} and minimum_reranker_score={minimum_reranker_score}\"\n + \n===========changed ref 2===========\n # module: app.backend.approaches.retrievethenreadvision\n class RetrieveThenReadVisionApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n q = messages[-1][\"content\"]\n overrides = context.get(\"overrides\", {})\n auth_claims = context.get(\"auth_claims\", {})\n has_text = overrides.get(\"retrieval_mode\") in [\"text\", \"hybrid\", None]\n has_vector = overrides.get(\"retrieval_mode\") in [\"vectors\", \"hybrid\", None]\n vector_fields = overrides.get(\"vector_fields\", [\"embedding\"])\n \n include_gtpV_text = overrides.get(\"gpt4v_input\") in [\"textAndImages\", \"texts\", None]\n include_gtpV_images = overrides.get(\"gpt4v_input\") in [\"textAndImages\", \"images\", None]\n \n use_semantic_captions = True if overrides.get(\"semantic_captions\") and has_text else False\n top = overrides.get(\"top\", 3)\n + minimum_search_score = overrides.get(\"minimum_search_score\", 0.0)\n + minimum_reranker_score = overrides.get(\"minimum_reranker_score\", 0.0)\n filter = self.build_filter(overrides, auth_claims)\n use_semantic_ranker = overrides.get(\"semantic_ranker\") and has_text\n \n # If retrieval mode includes vectors, compute an embedding for the query\n \n vectors = []\n if has_vector:\n for field in vector_fields:\n vector = (\n await self.compute_text_embedding(q)\n if field == \"embedding\"\n else await self.compute_image_embedding(q)\n )\n vectors.append(vector)\n \n # Only keep the text"}}},{"rowIdx":5849,"cells":{"path":{"kind":"string","value":"app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"ccf2494f3eadfae02126c2ec6b3be74d38e83618"},"commit_message":{"kind":"string","value":"Add minimum score criteria for AI search results (#1417)"},"ground_truth":{"kind":"string","value":" <9>: minimum_search_score = overrides.get(\"minimum_search_score\", 0.0)\n minimum_reranker_score = overrides.get(\"minimum_reranker_score\", 0.0)\n<18>: results = await self.search(\n top,\n query_text,\n filter,\n vectors,\n use_semantic_ranker,\n use_semantic_captions,\n minimum_search_score,\n minimum_reranker_score,\n )\n results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions)\n"},"main_code":{"kind":"string","value":" # module: app.backend.approaches.retrievethenread\n class RetrieveThenReadApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n <0> q = messages[-1][\"content\"]\n <1> overrides = context.get(\"overrides\", {})\n <2> auth_claims = context.get(\"auth_claims\", {})\n <3> has_text = overrides.get(\"retrieval_mode\") in [\"text\", \"hybrid\", None]\n <4> has_vector = overrides.get(\"retrieval_mode\") in [\"vectors\", \"hybrid\", None]\n <5> use_semantic_ranker = overrides.get(\"semantic_ranker\") and has_text\n <6> \n <7> use_semantic_captions = True if overrides.get(\"semantic_captions\") and has_text else False\n <8> top = overrides.get(\"top\", 3)\n <9> filter = self.build_filter(overrides, auth_claims)\n<10> # If retrieval mode includes vectors, compute an embedding for the query\n<11> vectors: list[VectorQuery] = []\n<12> if has_vector:\n<13> vectors.append(await self.compute_text_embedding(q))\n<14> \n<15> # Only keep the text query if the retrieval mode uses text, otherwise drop it\n<16> query_text = q if has_text else None\n<17> \n<18> results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions)\n<19> \n<20> user_content = [q]\n<21> \n<22> template = overrides.get(\"prompt_template\", self.system_chat_template)\n<23> model = self.chatgpt_model\n<24> message_builder = MessageBuilder(template, model)\n<25> \n<26> # Process results\n<27> sources_content = self.get_sources_content(results, use_semantic_captions, use_image_"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: app.backend.approaches.retrievethenread\n class RetrieveThenReadApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n # offset: 1\n \n # Append user message\n content = \"\\n\".join(sources_content)\n user_content = q + \"\\n\" + f\"Sources:\\n {content}\"\n message_builder.insert_message(\"user\", user_content)\n message_builder.insert_message(\"assistant\", self.answer)\n message_builder.insert_message(\"user\", self.question)\n updated_messages = message_builder.messages\n chat_completion = (\n await self.openai_client.chat.completions.create(\n # Azure OpenAI takes the deployment name as the model name\n model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model,\n messages=updated_messages,\n temperature=overrides.get(\"temperature\", 0.3),\n max_tokens=1024,\n n=1,\n )\n ).model_dump()\n \n data_points = {\"text\": sources_content}\n extra_info = {\n \"data_points\": data_points,\n \"thoughts\": [\n ThoughtStep(\n \"Search using user query\",\n query_text,\n {\n \"use_semantic_captions\": use_semantic_captions,\n \"use_semantic_ranker\": use_semantic_ranker,\n \"top\": top,\n \"filter\": filter,\n \"has_vector\": has_vector,\n },\n ),\n ThoughtStep(\n \"Search results\",\n [result.serialize_for_results() for result in results],\n ),\n ThoughtStep(\n \"Prompt to generate answer\",\n [\n===========below chunk 1===========\n # module: app.backend.approaches.retrievethenread\n class RetrieveThenReadApproach(Approach):\n def run(\n self,\n messages: list[dict],\n stream: bool = False, # Stream is not used in this approach\n session_state: Any = None,\n context: dict[str, Any] = {},\n ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:\n # offset: 2\n _results() for result in results],\n ),\n ThoughtStep(\n \"Prompt to generate answer\",\n [str(message) for message in updated_messages],\n (\n {\"model\": self.chatgpt_model, \"deployment\": self.chatgpt_deployment}\n if self.chatgpt_deployment\n else {\"model\": self.chatgpt_model}\n ),\n ),\n ],\n }\n \n chat_completion[\"choices\"][0][\"context\"] = extra_info\n chat_completion[\"choices\"][0][\"session_state\"] = session_state\n return chat_completion\n \n \n===========unchanged ref 0===========\n at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach\n system_chat_template = (\n \"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. \"\n + \"Use 'you' to refer to the individual asking the questions even if they ask with 'I'. \"\n + \"Answer the following question using only the data provided in the sources below. \"\n + \"For tabular information return it as an html table. Do not return markdown format. \"\n + \"Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. \"\n + \"If you cannot answer using the sources below, say you don't know. Use below example to answer\"\n )\n \n question = \"\"\"\n 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?'\n \n Sources:\n info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family.\n info2.pdf: Overlake is in-network for the employee plan.\n info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue.\n info4.pdf: In-network institutions include Overlake, Swedish and others in the region\n \"\"\"\n \n answer = \"In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf].\"\n \n at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__\n self.chatgpt_deployment = chatgpt_deployment\n \n self.openai_client = openai_client\n \n self.chatgpt_model = chatgpt_model\n \n \n===========unchanged ref 1===========\n at: approaches.approach\n ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None)\n \n at: approaches.approach.Approach\n build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]\n \n search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document]\n \n get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str]\n \n compute_text_embedding(q: str)\n \n run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]\n \n at: approaches.approach.Document\n id: Optional[str]\n \n content: Optional[str]\n \n embedding: Optional[List[float]]\n \n image_embedding: Optional[List[float]]\n \n category: Optional[str]\n \n sourcepage: Optional[str]\n \n sourcefile: Optional[str]\n \n oids: Optional[List[str]]\n \n groups: Optional[List[str]]\n \n captions: List[QueryCaptionResult]\n \n score: Optional[float] = None\n \n reranker_score: Optional[float] = None\n \n serialize_for_results() -> dict[str, Any]\n \n at: core.messagebuilder\n MessageBuilder(system_content: str, chatgpt_model: str)\n \n at: core.messagebuilder.MessageBuilder\n insert_message(role: str, content: Union[str, List[ChatCompletionContentPartParam]], index: int=1)\n \n \n===========unchanged ref 2===========\n at: core.messagebuilder.MessageBuilder.__init__\n self.messages: list[ChatCompletionMessageParam] = [\n ChatCompletionSystemMessageParam(role=\"system\", content=unicodedata.normalize(\"NFC\", system_content))\n ]\n \n at: typing\n AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_chatapproach\n + def mock_search(*args, **kwargs):\n + return MockAsyncSearchResultsIterator(kwargs.get(\"search_text\"), kwargs.get(\"vector_queries\"))\n + "}}},{"rowIdx":5850,"cells":{"path":{"kind":"string","value":"tests.test_app_config/test_app_visionkey_notfound"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"6aa14285543e923de55a1c639ac5b254aa1d69fd"},"commit_message":{"kind":"string","value":"Fix search env var (#1455)"},"ground_truth":{"kind":"string","value":" <2>: monkeypatch.setenv(\"AZURE_SEARCH_SECRET_NAME\", \"search-secret-name\")\n monkeypatch.setenv(\"SEARCH_SECRET_NAME\", \"search-secret-name\")\n"},"main_code":{"kind":"string","value":" # module: tests.test_app_config\n @pytest.mark.asyncio\n async def test_app_visionkey_notfound(monkeypatch, minimal_env):\n <0> monkeypatch.setenv(\"AZURE_KEY_VAULT_NAME\", \"my_key_vault\")\n <1> monkeypatch.setenv(\"VISION_SECRET_NAME\", \"\")\n <2> monkeypatch.setenv(\"SEARCH_SECRET_NAME\", \"search-secret-name\")\n <3> \n <4> async def get_secret(*args, **kwargs):\n <5> if args[1] == \"vision-secret-name\":\n <6> raise Exception(\"Key not found\")\n <7> return MockKeyVaultSecret(\"mysecret\")\n <8> \n <9> monkeypatch.setattr(SecretClient, \"get_secret\", get_secret)\n<10> \n<11> quart_app = app.create_app()\n<12> async with quart_app.test_app() as test_app:\n<13> test_app.test_client()\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: tests.mocks\n MockKeyVaultSecret(value)\n \n "}}},{"rowIdx":5851,"cells":{"path":{"kind":"string","value":"tests.test_app_config/test_app_searchkey_notfound"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"6aa14285543e923de55a1c639ac5b254aa1d69fd"},"commit_message":{"kind":"string","value":"Fix search env var (#1455)"},"ground_truth":{"kind":"string","value":" <2>: monkeypatch.setenv(\"AZURE_SEARCH_SECRET_NAME\", \"\")\n monkeypatch.setenv(\"SEARCH_SECRET_NAME\", \"\")\n"},"main_code":{"kind":"string","value":" # module: tests.test_app_config\n @pytest.mark.asyncio\n async def test_app_searchkey_notfound(monkeypatch, minimal_env):\n <0> monkeypatch.setenv(\"AZURE_KEY_VAULT_NAME\", \"my_key_vault\")\n <1> monkeypatch.setenv(\"VISION_SECRET_NAME\", \"vision-secret-name\")\n <2> monkeypatch.setenv(\"SEARCH_SECRET_NAME\", \"\")\n <3> \n <4> async def get_secret(*args, **kwargs):\n <5> if args[1] == \"search-secret-name\":\n <6> raise Exception(\"Key not found\")\n <7> return MockKeyVaultSecret(\"mysecret\")\n <8> \n <9> monkeypatch.setattr(SecretClient, \"get_secret\", get_secret)\n<10> \n<11> quart_app = app.create_app()\n<12> async with quart_app.test_app() as test_app:\n<13> test_app.test_client()\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: tests.mocks\n MockKeyVaultSecret(value)\n \n \n===========changed ref 0===========\n # module: tests.test_app_config\n @pytest.mark.asyncio\n async def test_app_visionkey_notfound(monkeypatch, minimal_env):\n monkeypatch.setenv(\"AZURE_KEY_VAULT_NAME\", \"my_key_vault\")\n monkeypatch.setenv(\"VISION_SECRET_NAME\", \"\")\n + monkeypatch.setenv(\"AZURE_SEARCH_SECRET_NAME\", \"search-secret-name\")\n - monkeypatch.setenv(\"SEARCH_SECRET_NAME\", \"search-secret-name\")\n \n async def get_secret(*args, **kwargs):\n if args[1] == \"vision-secret-name\":\n raise Exception(\"Key not found\")\n return MockKeyVaultSecret(\"mysecret\")\n \n monkeypatch.setattr(SecretClient, \"get_secret\", get_secret)\n \n quart_app = app.create_app()\n async with quart_app.test_app() as test_app:\n test_app.test_client()\n "}}},{"rowIdx":5852,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <1>: self.open_ai_dimensions = open_ai_dimensions\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n <0> self.open_ai_model_name = open_ai_model_name\n <1> self.disable_batch = disable_batch\n <2> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 2===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5853,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_batch"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<11>: emb_response = await client.embeddings.create(\n model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n )\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n <0> batches = self.split_text_into_batches(texts)\n <1> embeddings = []\n <2> client = await self.create_client()\n <3> for batch in batches:\n <4> async for attempt in AsyncRetrying(\n <5> retry=retry_if_exception_type(RateLimitError),\n <6> wait=wait_random_exponential(min=15, max=60),\n <7> stop=stop_after_attempt(15),\n <8> before_sleep=self.before_retry_sleep,\n <9> ):\n<10> with attempt:\n<11> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n<12> embeddings.extend([data.embedding for data in emb_response.data])\n<13> logger.info(\n<14> \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n<15> len(batch.texts),\n<16> batch.token_length,\n<17> )\n<18> \n<19> return embeddings\n<20> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5854,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_single"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <8>: emb_response = await client.embeddings.create(\n model=self.open_ai_model_name, input=text, **dimensions_args\n )\n emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n <0> client = await self.create_client()\n <1> async for attempt in AsyncRetrying(\n <2> retry=retry_if_exception_type(RateLimitError),\n <3> wait=wait_random_exponential(min=15, max=60),\n <4> stop=stop_after_attempt(15),\n <5> before_sleep=self.before_retry_sleep,\n <6> ):\n <7> with attempt:\n <8> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n <9> logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n<10> \n<11> return emb_response.data[0].embedding\n<12> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n \n return embeddings\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5855,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embeddings"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <0>: dimensions_args: ExtraArgs = (\n {\"dimensions\": self.open_ai_dimensions}\n if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n else {}\n )\n \n <1>: return await self.create_embedding_batch(texts, dimensions_args)\n return await self.create_embedding_batch(texts)\n <3>: return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n return [await self.create_embedding_single(text) for text in texts]\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n <0> if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n <1> return await self.create_embedding_batch(texts)\n <2> \n <3> return [await self.create_embedding_single(text) for text in texts]\n <4> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n \n return embeddings\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 5===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5856,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/AzureOpenAIEmbeddingService.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <0>: super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n super().__init__(open_ai_model_name, disable_batch)\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n <0> super().__init__(open_ai_model_name, disable_batch)\n <1> self.open_ai_service = open_ai_service\n <2> self.open_ai_deployment = open_ai_deployment\n <3> self.credential = credential\n <4> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n \n return embeddings\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 6===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5857,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.embeddings/OpenAIEmbeddingService.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <0>: super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n super().__init__(open_ai_model_name, disable_batch)\n"},"main_code":{"kind":"string","value":" scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n <0> super().__init__(open_ai_model_name, disable_batch)\n <1> self.credential = credential\n <2> self.organization = organization\n <3> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n \n return embeddings\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 7===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5858,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.searchmanager/SearchManager.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <5>: # Integrated vectorization uses the ada-002 model with 1536 dimensions\n self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n <0> self.search_info = search_info\n <1> self.search_analyzer_name = search_analyzer_name\n <2> self.use_acls = use_acls\n <3> self.use_int_vectorization = use_int_vectorization\n <4> self.embeddings = embeddings\n <5> self.search_images = search_images\n <6> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: scripts.prepdocslib.embeddings\n OpenAIEmbeddings(open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool=False)\n \n at: scripts.prepdocslib.strategy\n SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:\n - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]:\n batches = self.split_text_into_batches(texts)\n embeddings = []\n client = await self.create_client()\n for batch in batches:\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=batch.texts, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)\n embeddings.extend([data.embedding for data in emb_response.data])\n logger.info(\n \"Computed embeddings in batch. Batch size: %d, Token count: %d\",\n len(batch.texts),\n batch.token_length,\n )\n \n return embeddings\n "}}},{"rowIdx":5859,"cells":{"path":{"kind":"string","value":"scripts.prepdocslib.searchmanager/SearchManager.create_index"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<30>: vector_search_dimensions=self.embedding_dimensions,\n vector_search_dimensions=1536,\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n <0> logger.info(\"Ensuring search index %s exists\", self.search_info.index_name)\n <1> \n <2> async with self.search_info.create_search_index_client() as search_index_client:\n <3> fields = [\n <4> (\n <5> SimpleField(name=\"id\", type=\"Edm.String\", key=True)\n <6> if not self.use_int_vectorization\n <7> else SearchField(\n <8> name=\"id\",\n <9> type=\"Edm.String\",\n<10> key=True,\n<11> sortable=True,\n<12> filterable=True,\n<13> facetable=True,\n<14> analyzer_name=\"keyword\",\n<15> )\n<16> ),\n<17> SearchableField(\n<18> name=\"content\",\n<19> type=\"Edm.String\",\n<20> analyzer_name=self.search_analyzer_name,\n<21> ),\n<22> SearchField(\n<23> name=\"embedding\",\n<24> type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n<25> hidden=False,\n<26> searchable=True,\n<27> filterable=False,\n<28> sortable=False,\n<29> facetable=False,\n<30> vector_search_dimensions=1536,\n<31> vector_search_profile_name=\"embedding_config\",\n<32> ),\n<33> SimpleField(name=\"category\", type=\"Edm.String\", filterable=True, facetable=True),\n<34> SimpleField(\n<35> name=\"sourcepage\",\n<36> type=\"Edm.String\",\n<37> filterable=True,\n<38> facetable=True,\n<39> ),\n<40> SimpleField(\n<41> name=\"sourcefile\",\n<42> type=\"Edm.String\",\n<43> filterable=True,\n<44> facetable=True,\n<45> ),\n<46> ]\n<47> if self.use_acls:\n<48> fields.append(\n<49> SimpleField(\n<50> name=\"oids\",\n<51> type=SearchField"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 1\n filterable=True,\n )\n )\n fields.append(\n SimpleField(\n name=\"groups\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.String),\n filterable=True,\n )\n )\n if self.use_int_vectorization:\n fields.append(SearchableField(name=\"parent_id\", type=\"Edm.String\", filterable=True))\n if self.search_images:\n fields.append(\n SearchField(\n name=\"imageEmbedding\",\n type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n hidden=False,\n searchable=True,\n filterable=False,\n sortable=False,\n facetable=False,\n vector_search_dimensions=1024,\n vector_search_profile_name=\"embedding_config\",\n ),\n )\n \n index = SearchIndex(\n name=self.search_info.index_name,\n fields=fields,\n semantic_search=SemanticSearch(\n configurations=[\n SemanticConfiguration(\n name=\"default\",\n prioritized_fields=SemanticPrioritizedFields(\n title_field=None, content_fields=[SemanticField(field_name=\"content\")]\n ),\n )\n ]\n ),\n vector_search=VectorSearch(\n algorithms=[\n HnswAlgorithmConfiguration(\n name=\"hnsw_config\",\n parameters=HnswParameters(metric=\"cosine\"),\n )\n ],\n profiles=[\n VectorSearchProfile(\n name=\"embedding_config\",\n algorithm_configuration_name=\"hnsw_config\",\n vectorizer=(\n f\"{self.search_info.index_name}-vectorizer\" if self.use_int_vectorization else None\n ),\n ),\n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_\n===========below chunk 1===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None):\n # offset: 2\n \n ],\n vectorizers=vectorizers,\n ),\n )\n if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]:\n logger.info(\"Creating %s search index\", self.search_info.index_name)\n await search_index_client.create_index(index)\n else:\n logger.info(\"Search index %s already exists\", self.search_info.index_name)\n \n \n===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: scripts.prepdocslib.searchmanager\n logger = logging.getLogger(\"ingester\")\n \n at: scripts.prepdocslib.searchmanager.SearchManager.__init__\n self.search_info = search_info\n \n self.search_analyzer_name = search_analyzer_name\n \n self.use_acls = use_acls\n \n self.use_int_vectorization = use_int_vectorization\n \n self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n \n at: scripts.prepdocslib.strategy.SearchInfo\n create_search_index_client() -> SearchIndexClient\n \n at: scripts.prepdocslib.strategy.SearchInfo.__init__\n self.index_name = index_name\n \n at: typing\n List = _alias(list, 1, inst=False, name='List')\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n "}}},{"rowIdx":5860,"cells":{"path":{"kind":"string","value":"tests.test_chatvisionapproach/chat_approach"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<18>: embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n embedding_model=\"text-\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_chatvisionapproach\n @pytest.fixture\n def chat_approach(openai_client, mock_confidential_client_success):\n <0> return ChatReadRetrieveReadVisionApproach(\n <1> search_client=None,\n <2> openai_client=openai_client,\n <3> auth_helper=AuthenticationHelper(\n <4> search_index=MockSearchIndex,\n <5> use_authentication=True,\n <6> server_app_id=\"SERVER_APP\",\n <7> server_app_secret=\"SERVER_SECRET\",\n <8> client_app_id=\"CLIENT_APP\",\n <9> tenant_id=\"TENANT_ID\",\n<10> require_access_control=None,\n<11> ),\n<12> blob_container_client=None,\n<13> vision_endpoint=\"endpoint\",\n<14> vision_token_provider=lambda: \"token\",\n<15> gpt4v_deployment=\"gpt-4v\",\n<16> gpt4v_model=\"gpt-4v\",\n<17> embedding_deployment=\"embeddings\",\n<18> embedding_model=\"text-\",\n<19> sourcepage_field=\"\",\n<20> content_field=\"\",\n<21> query_language=\"en-us\",\n<22> query_speller=\"lexicon\",\n<23> )\n<24> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.fixtures\n fixture(fixture_function: FixtureFunction, *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=...) -> FixtureFunction\n fixture(fixture_function: None=..., *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=None) -> FixtureFunctionMarker\n \n at: tests.conftest\n mock_confidential_client_success(monkeypatch)\n \n at: tests.mocks\n MOCK_EMBEDDING_DIMENSIONS = 1536\n \n MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n \n at: tests.test_chatvisionapproach\n MockSearchIndex = SearchIndex(\n name=\"test\",\n fields=[\n SearchField(name=\"oids\", type=\"Collection(Edm.String)\"),\n SearchField(name=\"groups\", type=\"Collection(Edm.String)\"),\n ],\n )\n \n openai_client()\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n "}}},{"rowIdx":5861,"cells":{"path":{"kind":"string","value":"scripts.prepdocs/setup_embeddings_service"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<12>: open_ai_dimensions=openai_dimensions,\n<20>: open_ai_dimensions=openai_dimensions,\n"},"main_code":{"kind":"string","value":" # module: scripts.prepdocs\n def setup_embeddings_service(\n azure_credential: AsyncTokenCredential,\n openai_host: str,\n openai_model_name: str,\n openai_service: str,\n openai_deployment: str,\n + openai_dimensions: int,\n openai_key: Union[str, None],\n openai_org: Union[str, None],\n disable_vectors: bool = False,\n disable_batch_vectors: bool = False,\n ):\n <0> if disable_vectors:\n <1> logger.info(\"Not setting up embeddings service\")\n <2> return None\n <3> \n <4> if openai_host != \"openai\":\n <5> azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = (\n <6> azure_credential if openai_key is None else AzureKeyCredential(openai_key)\n <7> )\n <8> return AzureOpenAIEmbeddingService(\n <9> open_ai_service=openai_service,\n<10> open_ai_deployment=openai_deployment,\n<11> open_ai_model_name=openai_model_name,\n<12> credential=azure_open_ai_credential,\n<13> disable_batch=disable_batch_vectors,\n<14> )\n<15> else:\n<16> if openai_key is None:\n<17> raise ValueError(\"OpenAI key is required when using the non-Azure OpenAI API\")\n<18> return OpenAIEmbeddingService(\n<19> open_ai_model_name=openai_model_name,\n<20> credential=openai_key,\n<21> organization=openai_org,\n<22> disable_batch=disable_batch_vectors,\n<23> )\n<24> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: logging.Logger\n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n OpenAIEmbeddingService(open_ai_model_name: str, open_ai_dimensions: int, credential: str, organization: Optional[str]=None, disable_batch: bool=False)\n \n at: scripts.prepdocs\n logger = logging.getLogger(\"ingester\")\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n "}}},{"rowIdx":5862,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_success"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<26>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-ada-003\",\n<42>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-ada-003\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n <0> async def mock_create_client(*args, **kwargs):\n <1> # From https://platform.openai.com/docs/api-reference/embeddings/create\n <2> return MockClient(\n <3> embeddings_client=MockEmbeddingsClient(\n <4> create_embedding_response=openai.types.CreateEmbeddingResponse(\n <5> object=\"list\",\n <6> data=[\n <7> openai.types.Embedding(\n <8> embedding=[\n <9> 0.0023064255,\n<10> -0.009327292,\n<11> -0.0028842222,\n<12> ],\n<13> index=0,\n<14> object=\"embedding\",\n<15> )\n<16> ],\n<17> model=\"text-embedding-ada-002\",\n<18> usage=Usage(prompt_tokens=8, total_tokens=8),\n<19> )\n<20> )\n<21> )\n<22> \n<23> embeddings = AzureOpenAIEmbeddingService(\n<24> open_ai_service=\"x\",\n<25> open_ai_deployment=\"x\",\n<26> open_ai_model_name=\"text-ada-003\",\n<27> credential=MockAzureCredential(),\n<28> disable_batch=False,\n<29> )\n<30> monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n<31> assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n<32> [\n<33> 0.0023064255,\n<34> -0.009327292,\n<35> -0.0028842222,\n<36> ]\n<37> ]\n<38> \n<39> embeddings = AzureOpenAIEmbeddingService(\n<40> open_ai_service=\"x\",\n<41> open_ai_deployment=\"x\",\n<42> open_ai_model_name=\"text-ada-003\",\n<43> credential=MockAzureCredential(),\n<44> disable_batch=True,\n<45> )\n<46> monkeypatch.setattr(embeddings, \"create_client\","},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 1\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n open_ai_model_name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=False\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n open_ai_model_name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=True\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n "}}},{"rowIdx":5863,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <6>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-embedding-ada-002\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n <0> with caplog.at_level(logging.INFO):\n <1> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <2> with pytest.raises(tenacity.RetryError):\n <3> embeddings = AzureOpenAIEmbeddingService(\n <4> open_ai_service=\"x\",\n <5> open_ai_deployment=\"x\",\n <6> open_ai_model_name=\"text-embedding-ada-002\",\n <7> credential=MockAzureCredential(),\n <8> disable_batch=False,\n <9> )\n<10> monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11> await embeddings.create_embeddings(texts=[\"foo\"])\n<12> assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n<13> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n async def mock_create_client(*args, **kwargs):\n # From https://platform.openai.com/docs/api-reference/embeddings/create\n return MockClient(\n embeddings_client=MockEmbeddingsClient(\n create_embedding_response=openai.types.CreateEmbeddingResponse(\n object=\"list\",\n data=[\n openai.types.Embedding(\n embedding=[\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ],\n index=0,\n object=\"embedding\",\n )\n ],\n model=\"text-embedding-ada-002\",\n usage=Usage(prompt_tokens=8, total_tokens=8),\n )\n )\n )\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=False,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_\n===========changed ref 1===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 1\n \n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=True,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=False,\n - open_ai_model_name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=False\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=True,\n - open_ai_\n===========changed ref 2===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 2\n name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=True\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 5===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 6===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n "}}},{"rowIdx":5864,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_ratelimiterror_single"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <6>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-embedding-ada-002\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog):\n <0> with caplog.at_level(logging.INFO):\n <1> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <2> with pytest.raises(tenacity.RetryError):\n <3> embeddings = AzureOpenAIEmbeddingService(\n <4> open_ai_service=\"x\",\n <5> open_ai_deployment=\"x\",\n <6> open_ai_model_name=\"text-embedding-ada-002\",\n <7> credential=MockAzureCredential(),\n <8> disable_batch=True,\n <9> )\n<10> monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n<11> await embeddings.create_embeddings(texts=[\"foo\"])\n<12> assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n<13> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n with caplog.at_level(logging.INFO):\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n with pytest.raises(tenacity.RetryError):\n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-embedding-ada-002\",\n credential=MockAzureCredential(),\n disable_batch=False,\n )\n monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n await embeddings.create_embeddings(texts=[\"foo\"])\n assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n async def mock_create_client(*args, **kwargs):\n # From https://platform.openai.com/docs/api-reference/embeddings/create\n return MockClient(\n embeddings_client=MockEmbeddingsClient(\n create_embedding_response=openai.types.CreateEmbeddingResponse(\n object=\"list\",\n data=[\n openai.types.Embedding(\n embedding=[\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ],\n index=0,\n object=\"embedding\",\n )\n ],\n model=\"text-embedding-ada-002\",\n usage=Usage(prompt_tokens=8, total_tokens=8),\n )\n )\n )\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=False,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_\n===========changed ref 3===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 1\n \n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=True,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=False,\n - open_ai_model_name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=False\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=True,\n - open_ai_\n===========changed ref 4===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 2\n name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=True\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 6===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 7===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n "}}},{"rowIdx":5865,"cells":{"path":{"kind":"string","value":"tests.test_prepdocs/test_compute_embedding_autherror"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <5>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-embedding-ada-002\",\n<16>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-embedding-ada-002\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_autherror(monkeypatch, capsys):\n <0> monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n <1> with pytest.raises(openai.AuthenticationError):\n <2> embeddings = AzureOpenAIEmbeddingService(\n <3> open_ai_service=\"x\",\n <4> open_ai_deployment=\"x\",\n <5> open_ai_model_name=\"text-embedding-ada-002\",\n <6> credential=MockAzureCredential(),\n <7> disable_batch=False,\n <8> )\n <9> monkeypatch.setattr(embeddings, \"create_client\", create_auth_error_limit_client)\n<10> await embeddings.create_embeddings(texts=[\"foo\"])\n<11> \n<12> with pytest.raises(openai.AuthenticationError):\n<13> embeddings = AzureOpenAIEmbeddingService(\n<14> open_ai_service=\"x\",\n<15> open_ai_deployment=\"x\",\n<16> open_ai_model_name=\"text-embedding-ada-002\",\n<17> credential=MockAzureCredential(),\n<18> disable_batch=True,\n<19> )\n<20> monkeypatch.setattr(embeddings, \"create_client\", create_auth_error_limit_client)\n<21> await embeddings.create_embeddings(texts=[\"foo\"])\n<22> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog):\n with caplog.at_level(logging.INFO):\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n with pytest.raises(tenacity.RetryError):\n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-embedding-ada-002\",\n credential=MockAzureCredential(),\n disable_batch=True,\n )\n monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n await embeddings.create_embeddings(texts=[\"foo\"])\n assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 1===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog):\n with caplog.at_level(logging.INFO):\n monkeypatch.setattr(tenacity.wait_random_exponential, \"__call__\", lambda x, y: 0)\n with pytest.raises(tenacity.RetryError):\n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-embedding-ada-002\",\n credential=MockAzureCredential(),\n disable_batch=False,\n )\n monkeypatch.setattr(embeddings, \"create_client\", create_rate_limit_client)\n await embeddings.create_embeddings(texts=[\"foo\"])\n assert caplog.text.count(\"Rate limited on the OpenAI embeddings API\") == 14\n \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 3===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 4===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n async def mock_create_client(*args, **kwargs):\n # From https://platform.openai.com/docs/api-reference/embeddings/create\n return MockClient(\n embeddings_client=MockEmbeddingsClient(\n create_embedding_response=openai.types.CreateEmbeddingResponse(\n object=\"list\",\n data=[\n openai.types.Embedding(\n embedding=[\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ],\n index=0,\n object=\"embedding\",\n )\n ],\n model=\"text-embedding-ada-002\",\n usage=Usage(prompt_tokens=8, total_tokens=8),\n )\n )\n )\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=False,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = AzureOpenAIEmbeddingService(\n open_ai_service=\"x\",\n open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_\n===========changed ref 5===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 1\n \n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - open_ai_model_name=\"text-ada-003\",\n credential=MockAzureCredential(),\n disable_batch=True,\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=False,\n - open_ai_model_name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=False\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n \n embeddings = OpenAIEmbeddingService(\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=MockAzureCredential(),\n + organization=\"org\",\n + disable_batch=True,\n - open_ai_\n===========changed ref 6===========\n # module: tests.test_prepdocs\n @pytest.mark.asyncio\n async def test_compute_embedding_success(monkeypatch):\n # offset: 2\n name=\"text-ada-003\", credential=MockAzureCredential(), organization=\"org\", disable_batch=True\n )\n monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n assert await embeddings.create_embeddings(texts=[\"foo\"]) == [\n [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n ]\n "}}},{"rowIdx":5866,"cells":{"path":{"kind":"string","value":"app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <6>: self.embedding_dimensions = embedding_dimensions\n"},"main_code":{"kind":"string","value":" [str],\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n <0> self.search_client = search_client\n <1> self.blob_container_client = blob_container_client\n <2> self.openai_client = openai_client\n <3> self.auth_helper = auth_helper\n <4> self.embedding_model = embedding_model\n <5> self.embedding_deployment = embedding_deployment\n <6> self.sourcepage_field = sourcepage_field\n <7> self.content_field = content_field\n <8> self.gpt4v_deployment = gpt4v_deployment\n <9> self.gpt4v_model = gpt4v_model\n<10> self.query_language = query_language\n<11> self.query_speller = query_speller\n<12> self.vision_endpoint = vision_endpoint\n<13> self.vision_token_provider = vision_token_provider\n<14> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: approaches.approach.Approach\n __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]])\n \n at: core.authentication\n AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False)\n \n at: typing\n Awaitable = _alias(collections.abc.Awaitable, 1)\n \n Callable = _CallableType(collections.abc.Callable, 2)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:\n - def create_embedding_single(self, text: str) -> List[float]:\n client = await self.create_client()\n async for attempt in AsyncRetrying(\n retry=retry_if_exception_type(RateLimitError),\n wait=wait_random_exponential(min=15, max=60),\n stop=stop_after_attempt(15),\n before_sleep=self.before_retry_sleep,\n ):\n with attempt:\n + emb_response = await client.embeddings.create(\n + model=self.open_ai_model_name, input=text, **dimensions_args\n + )\n - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)\n logger.info(\"Computed embedding for text section. Character count: %d\", len(text))\n \n return emb_response.data[0].embedding\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n \"\"\"\n Contains common logic across both OpenAI and Azure OpenAI embedding services\n Can split source text into batches for more efficient embedding calls\n \"\"\"\n \n + SUPPORTED_BATCH_AOAI_MODEL = {\n + \"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n - SUPPORTED_BATCH_AOAI_MODEL = {\"text-embedding-ada-002\": {\"token_limit\": 8100, \"max_batch_size\": 16}}\n + \"text-embedding-3-small\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + \"text-embedding-3-large\": {\"token_limit\": 8100, \"max_batch_size\": 16},\n + }\n + SUPPORTED_DIMENSIONS_MODEL = {\n + \"text-embedding-ada-002\": False,\n + \"text-embedding-3-small\": True,\n + \"text-embedding-3-large\": True,\n + }\n "}}},{"rowIdx":5867,"cells":{"path":{"kind":"string","value":"tests.test_chatapproach/chat_approach"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <7>: embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n embedding_model=\"text-\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n <0> return ChatReadRetrieveReadApproach(\n <1> search_client=None,\n <2> auth_helper=None,\n <3> openai_client=None,\n <4> chatgpt_model=\"gpt-35-turbo\",\n <5> chatgpt_deployment=\"chat\",\n <6> embedding_deployment=\"embeddings\",\n <7> embedding_model=\"text-\",\n <8> sourcepage_field=\"\",\n <9> content_field=\"\",\n<10> query_language=\"en-us\",\n<11> query_speller=\"lexicon\",\n<12> )\n<13> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.fixtures\n fixture(fixture_function: FixtureFunction, *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=...) -> FixtureFunction\n fixture(fixture_function: None=..., *, scope: \"Union[_ScopeName, Callable[[str, Config], _ScopeName]]\"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[\n Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]\n ]=..., name: Optional[str]=None) -> FixtureFunctionMarker\n \n at: tests.mocks\n MOCK_EMBEDDING_DIMENSIONS = 1536\n \n MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n \n MockAsyncSearchResultsIterator(search_text, vector_queries: Optional[list[VectorQuery]])\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 7===========\n [str],\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n self.search_client = search_client\n self.blob_container_client = blob_container_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.embedding_model = embedding_model\n self.embedding_deployment = embedding_deployment\n + self.embedding_dimensions = embedding_dimensions\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.gpt4v_deployment = gpt4v_deployment\n self.gpt4v_model = gpt4v_model\n self.query_language = query_language\n self.query_speller = query_speller\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n "}}},{"rowIdx":5868,"cells":{"path":{"kind":"string","value":"tests.test_chatapproach/test_search_results_filtering_by_scores"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <7>: embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n embedding_model=\"text-\",\n"},"main_code":{"kind":"string","value":" expected_result_count\",\n [\n (0, 0, 1),\n (0, 2, 1),\n (0.03, 0, 1),\n (0.03, 2, 1),\n (1, 0, 0),\n (0, 4, 0),\n (1, 4, 0),\n ],\n )\n async def test_search_results_filtering_by_scores(\n monkeypatch, minimum_search_score, minimum_reranker_score, expected_result_count\n ):\n <0> chat_approach = ChatReadRetrieveReadApproach(\n <1> search_client=SearchClient(endpoint=\"\", index_name=\"\", credential=AzureKeyCredential(\"\")),\n <2> auth_helper=None,\n <3> openai_client=None,\n <4> chatgpt_model=\"gpt-35-turbo\",\n <5> chatgpt_deployment=\"chat\",\n <6> embedding_deployment=\"embeddings\",\n <7> embedding_model=\"text-\",\n <8> sourcepage_field=\"\",\n <9> content_field=\"\",\n<10> query_language=\"en-us\",\n<11> query_speller=\"lexicon\",\n<12> )\n<13> \n<14> monkeypatch.setattr(SearchClient, \"search\", mock_search)\n<15> \n<16> filtered_results = await chat_approach.search(\n<17> top=10,\n<18> query_text=\"test query\",\n<19> filter=None,\n<20> vectors=[],\n<21> use_semantic_ranker=True,\n<22> use_semantic_captions=True,\n<23> minimum_search_score=minimum_search_score,\n<24> minimum_reranker_score=minimum_reranker_score,\n<25> )\n<26> \n<27> assert (\n<28> len(filtered_results) == expected_result_count\n<29> ), f\"Expected {expected_result_count} results with minimum_search_score={minimum_search_score} and minimum_reranker_score={minimum_reranker_score}\"\n<30> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.mark.structures.MarkGenerator\n skip: _SkipMarkDecorator\n \n skipif: _SkipifMarkDecorator\n \n xfail: _XfailMarkDecorator\n \n parametrize: _ParametrizeMarkDecorator\n \n usefixtures: _UsefixturesMarkDecorator\n \n filterwarnings: _FilterwarningsMarkDecorator\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: tests.mocks\n MOCK_EMBEDDING_DIMENSIONS = 1536\n \n MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n \n at: tests.test_chatapproach\n mock_search(*args, **kwargs)\n \n at: tests.test_chatapproach.test_get_messages_from_history_few_shots\n user_query_request = \"What does a Product manager do?\"\n \n messages = chat_approach.get_messages_from_history(\n system_prompt=chat_approach.query_prompt_template,\n model_id=chat_approach.chatgpt_model,\n user_content=user_query_request,\n history=[],\n max_tokens=chat_approach.chatgpt_token_limit - len(user_query_request),\n few_shots=chat_approach.query_prompt_few_shots,\n )\n \n \n===========changed ref 0===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 8===========\n [str],\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n self.search_client = search_client\n self.blob_container_client = blob_container_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.embedding_model = embedding_model\n self.embedding_deployment = embedding_deployment\n + self.embedding_dimensions = embedding_dimensions\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.gpt4v_deployment = gpt4v_deployment\n self.gpt4v_model = gpt4v_model\n self.query_language = query_language\n self.query_speller = query_speller\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n "}}},{"rowIdx":5869,"cells":{"path":{"kind":"string","value":"app.backend.approaches.approach/Approach.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <7>: self.embedding_dimensions = embedding_dimensions\n"},"main_code":{"kind":"string","value":" ai_client: AsyncOpenAI,\n auth_helper: AuthenticationHelper,\n query_language: Optional[str],\n query_speller: Optional[str],\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n openai_host: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]],\n ):\n <0> self.search_client = search_client\n <1> self.openai_client = openai_client\n <2> self.auth_helper = auth_helper\n <3> self.query_language = query_language\n <4> self.query_speller = query_speller\n <5> self.embedding_deployment = embedding_deployment\n <6> self.embedding_model = embedding_model\n <7> self.openai_host = openai_host\n <8> self.vision_endpoint = vision_endpoint\n <9> self.vision_token_provider = vision_token_provider\n<10> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: abc\n ABC()\n \n at: core.authentication\n AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False)\n \n at: dataclasses\n dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]\n dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]\n dataclass(_cls: Type[_T]) -> Type[_T]\n \n at: typing\n Awaitable = _alias(collections.abc.Awaitable, 1)\n \n Callable = _CallableType(collections.abc.Callable, 2)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 7===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 8===========\n [str],\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n self.search_client = search_client\n self.blob_container_client = blob_container_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.embedding_model = embedding_model\n self.embedding_deployment = embedding_deployment\n + self.embedding_dimensions = embedding_dimensions\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.gpt4v_deployment = gpt4v_deployment\n self.gpt4v_model = gpt4v_model\n self.query_language = query_language\n self.query_speller = query_speller\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n "}}},{"rowIdx":5870,"cells":{"path":{"kind":"string","value":"app.backend.approaches.approach/Approach.compute_text_embedding"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <0>: SUPPORTED_DIMENSIONS_MODEL = {\n \"text-embedding-ada-002\": False,\n \"text-embedding-3-small\": True,\n \"text-embedding-3-large\": True,\n }\n \n class ExtraArgs(TypedDict, total=False):\n dimensions: int\n \n dimensions_args: ExtraArgs = (\n {\"dimensions\": self.embedding_dimensions} if SUPPORTED_DIMENSIONS_MODEL[self.embedding_model] else {}\n )\n <4>: **dimensions_args,\n"},"main_code":{"kind":"string","value":" # module: app.backend.approaches.approach\n class Approach(ABC):\n def compute_text_embedding(self, q: str):\n <0> embedding = await self.openai_client.embeddings.create(\n <1> # Azure OpenAI takes the deployment name as the model name\n <2> model=self.embedding_deployment if self.embedding_deployment else self.embedding_model,\n <3> input=q,\n <4> )\n <5> query_vector = embedding.data[0].embedding\n <6> return VectorizedQuery(vector=query_vector, k_nearest_neighbors=50, fields=\"embedding\")\n <7> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: os.path\n splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr]\n splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr]\n \n \n===========changed ref 0===========\n ai_client: AsyncOpenAI,\n auth_helper: AuthenticationHelper,\n query_language: Optional[str],\n query_speller: Optional[str],\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n openai_host: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]],\n ):\n self.search_client = search_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.query_language = query_language\n self.query_speller = query_speller\n self.embedding_deployment = embedding_deployment\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.openai_host = openai_host\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 7===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n \n===========changed ref 9===========\n [str],\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n self.search_client = search_client\n self.blob_container_client = blob_container_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.embedding_model = embedding_model\n self.embedding_deployment = embedding_deployment\n + self.embedding_dimensions = embedding_dimensions\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.gpt4v_deployment = gpt4v_deployment\n self.gpt4v_model = gpt4v_model\n self.query_language = query_language\n self.query_speller = query_speller\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n "}}},{"rowIdx":5871,"cells":{"path":{"kind":"string","value":"tests.test_searchmanager/test_update_content_with_embeddings"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":"<32>: open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n open_ai_model_name=\"text-ada-003\",\n"},"main_code":{"kind":"string","value":" # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_update_content_with_embeddings(monkeypatch, search_info):\n <0> async def mock_create_client(*args, **kwargs):\n <1> # From https://platform.openai.com/docs/api-reference/embeddings/create\n <2> return MockClient(\n <3> embeddings_client=MockEmbeddingsClient(\n <4> create_embedding_response=openai.types.CreateEmbeddingResponse(\n <5> object=\"list\",\n <6> data=[\n <7> openai.types.Embedding(\n <8> embedding=[\n <9> 0.0023064255,\n<10> -0.009327292,\n<11> -0.0028842222,\n<12> ],\n<13> index=0,\n<14> object=\"embedding\",\n<15> )\n<16> ],\n<17> model=\"text-embedding-ada-002\",\n<18> usage=Usage(prompt_tokens=8, total_tokens=8),\n<19> )\n<20> )\n<21> )\n<22> \n<23> documents_uploaded = []\n<24> \n<25> async def mock_upload_documents(self, documents):\n<26> documents_uploaded.extend(documents)\n<27> \n<28> monkeypatch.setattr(SearchClient, \"upload_documents\", mock_upload_documents)\n<29> embeddings = AzureOpenAIEmbeddingService(\n<30> open_ai_service=\"x\",\n<31> open_ai_deployment=\"x\",\n<32> open_ai_model_name=\"text-ada-003\",\n<33> credential=AzureKeyCredential(\"test\"),\n<34> disable_batch=True,\n<35> )\n<36> monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n<37> manager = SearchManager(\n<38> search_info,\n<39> embeddings=embeddings,\n<40> )\n<41> \n<42> test_io = io.BytesIO(b\"test content\")\n<43> test_io.name = \"test/foo.pdf\"\n<44> file = File(test_io)\n<45> \n<46> await manager.update_content(\n<47> [\n "},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_update_content_with_embeddings(monkeypatch, search_info):\n # offset: 1\n split_page=SplitPage(\n page_num=0,\n text=\"test content\",\n ),\n content=file,\n category=\"test\",\n )\n ]\n )\n \n assert len(documents_uploaded) == 1, \"It should have uploaded one document\"\n assert documents_uploaded[0][\"embedding\"] == [\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ]\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: io\n BytesIO(initial_bytes: bytes=...)\n \n at: io.BytesIO\n name: Any\n \n at: scripts.prepdocslib.embeddings\n AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False)\n \n at: scripts.prepdocslib.listfilestrategy\n File(content: IO, acls: Optional[dict[str, list]]=None)\n \n at: scripts.prepdocslib.page\n SplitPage(page_num: int, text: str)\n \n at: scripts.prepdocslib.searchmanager\n Section(split_page: SplitPage, content: File, category: Optional[str]=None)\n \n SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False)\n \n at: scripts.prepdocslib.searchmanager.SearchManager\n update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None)\n \n at: tests.test_searchmanager\n MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse)\n \n MockClient(embeddings_client)\n \n \n===========changed ref 0===========\n # module: tests.test_searchmanager\n + @pytest.fixture\n + def embeddings_service(monkeypatch):\n + async def mock_create_client(*args, **kwargs):\n + # From https://platform.openai.com/docs/api-reference/embeddings/create\n + return MockClient(\n + embeddings_client=MockEmbeddingsClient(\n + create_embedding_response=openai.types.CreateEmbeddingResponse(\n + object=\"list\",\n + data=[\n + openai.types.Embedding(\n + embedding=[\n + 0.0023064255,\n + -0.009327292,\n + -0.0028842222,\n + ],\n + index=0,\n + object=\"embedding\",\n + )\n + ],\n + model=\"text-embedding-ada-002\",\n + usage=Usage(prompt_tokens=8, total_tokens=8),\n + )\n + )\n + )\n + \n + embeddings = AzureOpenAIEmbeddingService(\n + open_ai_service=\"x\",\n + open_ai_deployment=\"x\",\n + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME,\n + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n + credential=AzureKeyCredential(\"test\"),\n + disable_batch=True,\n + )\n + monkeypatch.setattr(embeddings, \"create_client\", mock_create_client)\n + return embeddings\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 2===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 3===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 4===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 6===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n "}}},{"rowIdx":5872,"cells":{"path":{"kind":"string","value":"app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <7>: self.embedding_dimensions = embedding_dimensions\n"},"main_code":{"kind":"string","value":" ai_client: AsyncOpenAI,\n chatgpt_model: str,\n chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n ):\n <0> self.search_client = search_client\n <1> self.openai_client = openai_client\n <2> self.auth_helper = auth_helper\n <3> self.chatgpt_model = chatgpt_model\n <4> self.chatgpt_deployment = chatgpt_deployment\n <5> self.embedding_deployment = embedding_deployment\n <6> self.embedding_model = embedding_model\n <7> self.sourcepage_field = sourcepage_field\n <8> self.content_field = content_field\n <9> self.query_language = query_language\n<10> self.query_speller = query_speller\n<11> self.chatgpt_token_limit = get_token_limit(chatgpt_model)\n<12> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: approaches.approach.Approach\n __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]])\n \n at: core.authentication\n AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n ai_client: AsyncOpenAI,\n auth_helper: AuthenticationHelper,\n query_language: Optional[str],\n query_speller: Optional[str],\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n openai_host: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]],\n ):\n self.search_client = search_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.query_language = query_language\n self.query_speller = query_speller\n self.embedding_deployment = embedding_deployment\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.openai_host = openai_host\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n \n===========changed ref 7===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 8===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n def create_embeddings(self, texts: List[str]) -> List[List[float]]:\n + dimensions_args: ExtraArgs = (\n + {\"dimensions\": self.open_ai_dimensions}\n + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)\n + else {}\n + )\n + \n if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:\n + return await self.create_embedding_batch(texts, dimensions_args)\n - return await self.create_embedding_batch(texts)\n \n + return [await self.create_embedding_single(text, dimensions_args) for text in texts]\n - return [await self.create_embedding_single(text) for text in texts]\n "}}},{"rowIdx":5873,"cells":{"path":{"kind":"string","value":"app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <6>: self.embedding_dimensions = embedding_dimensions\n"},"main_code":{"kind":"string","value":" ai_client: AsyncOpenAI,\n chatgpt_model: str,\n chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI\n embedding_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n ):\n <0> self.search_client = search_client\n <1> self.chatgpt_deployment = chatgpt_deployment\n <2> self.openai_client = openai_client\n <3> self.auth_helper = auth_helper\n <4> self.chatgpt_model = chatgpt_model\n <5> self.embedding_model = embedding_model\n <6> self.chatgpt_deployment = chatgpt_deployment\n <7> self.embedding_deployment = embedding_deployment\n <8> self.sourcepage_field = sourcepage_field\n <9> self.content_field = content_field\n<10> self.query_language = query_language\n<11> self.query_speller = query_speller\n<12> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: approaches.approach.Approach\n __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]])\n \n at: core.authentication\n AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n ai_client: AsyncOpenAI,\n auth_helper: AuthenticationHelper,\n query_language: Optional[str],\n query_speller: Optional[str],\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n openai_host: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]],\n ):\n self.search_client = search_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.query_language = query_language\n self.query_speller = query_speller\n self.embedding_deployment = embedding_deployment\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.openai_host = openai_host\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n \n===========changed ref 7===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 8===========\n ai_client: AsyncOpenAI,\n chatgpt_model: str,\n chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n ):\n self.search_client = search_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.chatgpt_model = chatgpt_model\n self.chatgpt_deployment = chatgpt_deployment\n self.embedding_deployment = embedding_deployment\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.query_language = query_language\n self.query_speller = query_speller\n self.chatgpt_token_limit = get_token_limit(chatgpt_model)\n "}}},{"rowIdx":5874,"cells":{"path":{"kind":"string","value":"app.backend.approaches.chatreadretrievereadvision/ChatReadRetrieveReadVisionApproach.__init__"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"7a7881e2269d4a8bdf820eb6b33f723019d07a56"},"commit_message":{"kind":"string","value":"Add support for using new ada models with different dimensions (#1378)"},"ground_truth":{"kind":"string","value":" <8>: self.embedding_dimensions = embedding_dimensions\n"},"main_code":{"kind":"string","value":" zure OpenAI\n gpt4v_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]]\n ):\n <0> self.search_client = search_client\n <1> self.blob_container_client = blob_container_client\n <2> self.openai_client = openai_client\n <3> self.auth_helper = auth_helper\n <4> self.gpt4v_deployment = gpt4v_deployment\n <5> self.gpt4v_model = gpt4v_model\n <6> self.embedding_deployment = embedding_deployment\n <7> self.embedding_model = embedding_model\n <8> self.sourcepage_field = sourcepage_field\n <9> self.content_field = content_field\n<10> self.query_language = query_language\n<11> self.query_speller = query_speller\n<12> self.vision_endpoint = vision_endpoint\n<13> self.vision_token_provider = vision_token_provider\n<14> self.chatgpt_token_limit = get_token_limit(gpt4v_model)\n<15> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: approaches.approach.Approach\n __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]])\n \n at: core.authentication\n AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False)\n \n at: typing\n Awaitable = _alias(collections.abc.Awaitable, 1)\n \n Callable = _CallableType(collections.abc.Callable, 2)\n \n \n===========changed ref 0===========\n # module: scripts.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 1===========\n # module: scripts.prepdocslib.embeddings\n class OpenAIEmbeddings(ABC):\n + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):\n - def __init__(self, open_ai_model_name: str, disable_batch: bool = False):\n self.open_ai_model_name = open_ai_model_name\n + self.open_ai_dimensions = open_ai_dimensions\n self.disable_batch = disable_batch\n \n===========changed ref 2===========\n scripts.prepdocslib.embeddings\n class OpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n + self,\n + open_ai_model_name: str,\n + open_ai_dimensions: int,\n + credential: str,\n + organization: Optional[str] = None,\n + disable_batch: bool = False,\n - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.credential = credential\n self.organization = organization\n \n===========changed ref 3===========\n # module: tests.mocks\n + MOCK_EMBEDDING_DIMENSIONS = 1536\n + MOCK_EMBEDDING_MODEL_NAME = \"text-embedding-ada-002\"\n + \n MockToken = namedtuple(\"MockToken\", [\"token\", \"expires_on\", \"value\"])\n \n===========changed ref 4===========\n # module: scripts.prepdocslib.embeddings\n class AzureOpenAIEmbeddingService(OpenAIEmbeddings):\n def __init__(\n self,\n open_ai_service: str,\n open_ai_deployment: str,\n open_ai_model_name: str,\n + open_ai_dimensions: int,\n credential: Union[AsyncTokenCredential, AzureKeyCredential],\n disable_batch: bool = False,\n ):\n + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)\n - super().__init__(open_ai_model_name, disable_batch)\n self.open_ai_service = open_ai_service\n self.open_ai_deployment = open_ai_deployment\n self.credential = credential\n \n===========changed ref 5===========\n # module: scripts.prepdocslib.searchmanager\n class SearchManager:\n def __init__(\n self,\n search_info: SearchInfo,\n search_analyzer_name: Optional[str] = None,\n use_acls: bool = False,\n use_int_vectorization: bool = False,\n embeddings: Optional[OpenAIEmbeddings] = None,\n search_images: bool = False,\n ):\n self.search_info = search_info\n self.search_analyzer_name = search_analyzer_name\n self.use_acls = use_acls\n self.use_int_vectorization = use_int_vectorization\n self.embeddings = embeddings\n + # Integrated vectorization uses the ada-002 model with 1536 dimensions\n + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536\n self.search_images = search_images\n \n===========changed ref 6===========\n ai_client: AsyncOpenAI,\n auth_helper: AuthenticationHelper,\n query_language: Optional[str],\n query_speller: Optional[str],\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n embedding_model: str,\n + embedding_dimensions: int,\n openai_host: str,\n vision_endpoint: str,\n vision_token_provider: Callable[[], Awaitable[str]],\n ):\n self.search_client = search_client\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.query_language = query_language\n self.query_speller = query_speller\n self.embedding_deployment = embedding_deployment\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.openai_host = openai_host\n self.vision_endpoint = vision_endpoint\n self.vision_token_provider = vision_token_provider\n \n===========changed ref 7===========\n # module: tests.test_chatapproach\n @pytest.fixture\n def chat_approach():\n return ChatReadRetrieveReadApproach(\n search_client=None,\n auth_helper=None,\n openai_client=None,\n chatgpt_model=\"gpt-35-turbo\",\n chatgpt_deployment=\"chat\",\n embedding_deployment=\"embeddings\",\n + embedding_model=MOCK_EMBEDDING_MODEL_NAME,\n + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS,\n - embedding_model=\"text-\",\n sourcepage_field=\"\",\n content_field=\"\",\n query_language=\"en-us\",\n query_speller=\"lexicon\",\n )\n \n===========changed ref 8===========\n ai_client: AsyncOpenAI,\n chatgpt_model: str,\n chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI\n embedding_model: str,\n embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode=\"text\"\n + embedding_dimensions: int,\n sourcepage_field: str,\n content_field: str,\n query_language: str,\n query_speller: str,\n ):\n self.search_client = search_client\n self.chatgpt_deployment = chatgpt_deployment\n self.openai_client = openai_client\n self.auth_helper = auth_helper\n self.chatgpt_model = chatgpt_model\n self.embedding_model = embedding_model\n + self.embedding_dimensions = embedding_dimensions\n self.chatgpt_deployment = chatgpt_deployment\n self.embedding_deployment = embedding_deployment\n self.sourcepage_field = sourcepage_field\n self.content_field = content_field\n self.query_language = query_language\n self.query_speller = query_speller\n "}}},{"rowIdx":5875,"cells":{"path":{"kind":"string","value":"tests.test_blob_manager/test_upload_and_remove_all"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <2>: print(f.content.name)\n"},"main_code":{"kind":"string","value":" # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager):\n <0> with NamedTemporaryFile(suffix=\".pdf\") as temp_file:\n <1> f = File(temp_file.file)\n <2> print(f.content.name)\n <3> filename = os.path.basename(f.content.name)\n <4> \n <5> # Set up mocks used by upload_blob\n <6> async def mock_exists(*args, **kwargs):\n <7> return True\n <8> \n <9> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.exists\", mock_exists)\n<10> \n<11> async def mock_upload_blob(self, name, *args, **kwargs):\n<12> assert name == filename\n<13> return True\n<14> \n<15> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.upload_blob\", mock_upload_blob)\n<16> \n<17> await blob_manager.upload_blob(f)\n<18> \n<19> # Set up mocks used by remove_blob\n<20> def mock_list_blob_names(*args, **kwargs):\n<21> assert kwargs.get(\"name_starts_with\") is None\n<22> \n<23> class AsyncBlobItemsIterator:\n<24> def __init__(self, file):\n<25> self.files = [file]\n<26> \n<27> def __aiter__(self):\n<28> return self\n<29> \n<30> async def __anext__(self):\n<31> if self.files:\n<32> return self.files.pop()\n<33> raise StopAsyncIteration\n<34> \n<35> return AsyncBlobItemsIterator(filename)\n<36> \n<37> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list_blob_names)\n<38> \n<39> async def mock_delete_blob(self, name, *args, **kwargs):\n<40> assert name == filename\n<41> return True\n<42> \n<43> monkeypatch."},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager):\n # offset: 1\n \n await blob_manager.remove_blob()\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.mark.structures.MarkGenerator\n skip: _SkipMarkDecorator\n \n skipif: _SkipifMarkDecorator\n \n xfail: _XfailMarkDecorator\n \n parametrize: _ParametrizeMarkDecorator\n \n usefixtures: _UsefixturesMarkDecorator\n \n filterwarnings: _FilterwarningsMarkDecorator\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: os.path\n basename(p: _PathLike[AnyStr]) -> AnyStr\n basename(p: AnyStr) -> AnyStr\n \n at: sys\n version_info: _version_info\n \n at: sys._version_info\n major: int\n \n minor: int\n \n micro: int\n \n releaselevel: str\n \n serial: int\n \n \n===========unchanged ref 1===========\n at: tempfile\n NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any]\n NamedTemporaryFile(mode: Literal[\"r\", \"w\", \"a\", \"x\", \"r+\", \"w+\", \"a+\", \"x+\", \"rt\", \"wt\", \"at\", \"xt\", \"r+t\", \"w+t\", \"a+t\", \"x+t\"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str]\n NamedTemporaryFile(mode: Literal[\"rb\", \"wb\", \"ab\", \"xb\", \"r+b\", \"w+b\", \"a+b\", \"x+b\"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes]\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 1===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 2===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 3===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 4===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + "}}},{"rowIdx":5876,"cells":{"path":{"kind":"string","value":"tests.test_content_file/test_content_file"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <0>: class MockAiohttpClientResponse404(aiohttp.ClientResponse):\n <1>: def __init__(self, url, body_bytes, headers=None):\n <2>: self._body = body_bytes\n <3>: self._headers = headers\n <4>: self._cache = {}\n <5>: self.status = 404\n <6>: self.reason = \"Not Found\"\n <7>: self._url = url\n <8>: \n <9>: class MockAiohttpClientResponse(aiohttp.ClientResponse):\n<10>: def __init__(self, url, body_bytes, headers=None):\n<11>: self._body = body_bytes\n<12>: self._headers = headers\n<13>: self._cache = {}\n<14>: self.status = 200\n<15>: self.reason = \"OK\"\n<16>: self._url = url\n<17>: \n<20>: if request.url.endswith(\"notfound.pdf\") or request.url.endswith(\"userdoc.pdf\"):\n if request.url.endswith(\"notfound.pdf\"):\n<48>: # Then we can plug this into any SDK via kwargs:\n"},"main_code":{"kind":"string","value":" # module: tests.test_content_file\n @pytest.mark.asyncio\n async def test_content_file(monkeypatch, mock_env, mock_acs_search):\n <0> class MockAiohttpClientResponse404(aiohttp.ClientResponse):\n <1> def __init__(self, url, body_bytes, headers=None):\n <2> self._body = body_bytes\n <3> self._headers = headers\n <4> self._cache = {}\n <5> self.status = 404\n <6> self.reason = \"Not Found\"\n <7> self._url = url\n <8> \n <9> class MockAiohttpClientResponse(aiohttp.ClientResponse):\n<10> def __init__(self, url, body_bytes, headers=None):\n<11> self._body = body_bytes\n<12> self._headers = headers\n<13> self._cache = {}\n<14> self.status = 200\n<15> self.reason = \"OK\"\n<16> self._url = url\n<17> \n<18> class MockTransport(AsyncHttpTransport):\n<19> async def send(self, request: HttpRequest, **kwargs) -> AioHttpTransportResponse:\n<20> if request.url.endswith(\"notfound.pdf\"):\n<21> raise ResourceNotFoundError(MockAiohttpClientResponse404(request.url, b\"\"))\n<22> else:\n<23> return AioHttpTransportResponse(\n<24> request,\n<25> MockAiohttpClientResponse(\n<26> request.url,\n<27> b\"test content\",\n<28> {\n<29> \"Content-Type\": \"application/octet-stream\",\n<30> \"Content-Range\": \"bytes 0-27/28\",\n<31> \"Content-Length\": \"28\",\n<32> },\n<33> ),\n<34> )\n<35> \n<36> async def __aenter__(self):\n<37> return self\n<38> \n<39> async def __aexit__(self, *args):\n<40> pass\n<41> \n<42> async def open(self):\n<43> pass\n<44> \n<45> async def close(self):\n<46> pass\n<47> \n<48> # Then we can plug this into any SDK via kwargs:\n<49> blob_client = BlobServiceClient(\n<50> f\"https://{"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_content_file\n @pytest.mark.asyncio\n async def test_content_file(monkeypatch, mock_env, mock_acs_search):\n # offset: 1\n credential=MockAzureCredential(),\n transport=MockTransport(),\n retry_total=0, # Necessary to avoid unnecessary network requests during tests\n )\n blob_container_client = blob_client.get_container_client(os.environ[\"AZURE_STORAGE_CONTAINER\"])\n \n quart_app = app.create_app()\n async with quart_app.test_app() as test_app:\n quart_app.config.update({\"blob_container_client\": blob_container_client})\n \n client = test_app.test_client()\n response = await client.get(\"/content/notfound.pdf\")\n assert response.status_code == 404\n \n response = await client.get(\"/content/role_library.pdf\")\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/pdf\"\n assert await response.get_data() == b\"test content\"\n \n response = await client.get(\"/content/role_library.pdf#page=10\")\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/pdf\"\n assert await response.get_data() == b\"test content\"\n \n \n===========unchanged ref 0===========\n at: aiohttp.client_reqrep\n ClientResponse(method: str, url: URL, *, writer: \"asyncio.Task[None]\", continue100: Optional[\"asyncio.Future[bool]\"], timer: BaseTimerContext, request_info: RequestInfo, traces: List[\"Trace\"], loop: asyncio.AbstractEventLoop, session: \"ClientSession\")\n \n at: aiohttp.client_reqrep.ClientResponse\n version = None # HTTP-Version\n \n status: int = None # type: ignore[assignment] # Status-Code\n \n reason = None # Reason-Phrase\n \n content: StreamReader = None # type: ignore[assignment] # Payload stream\n \n _headers: \"CIMultiDictProxy[str]\" = None # type: ignore[assignment]\n \n _raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers\n \n _connection = None # current connection\n \n _source_traceback: Optional[traceback.StackSummary] = None\n \n _closed = True # to allow __del__ for non-initialized properly response\n \n _released = False\n \n __init__(self, method: str, url: URL, *, writer: \"asyncio.Task[None]\", continue100: Optional[\"asyncio.Future[bool]\"], timer: BaseTimerContext, request_info: RequestInfo, traces: List[\"Trace\"], loop: asyncio.AbstractEventLoop, session: \"ClientSession\") -> None\n \n \n===========changed ref 0===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 1===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 2===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 3===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 4===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 5===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager):\n with NamedTemporaryFile(suffix=\".pdf\") as temp_file:\n f = File(temp_file.file)\n - print(f.content.name)\n filename = os.path.basename(f.content.name)\n \n # Set up mocks used by upload_blob\n async def mock_exists(*args, **kwargs):\n return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.exists\", mock_exists)\n \n async def mock_upload_blob(self, name, *args, **kwargs):\n assert name == filename\n return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.upload_blob\", mock_upload_blob)\n \n await blob_manager.upload_blob(f)\n \n # Set up mocks used by remove_blob\n def mock_list_blob_names(*args, **kwargs):\n assert kwargs.get(\"name_starts_with\") is None\n \n class AsyncBlobItemsIterator:\n def __init__(self, file):\n self.files = [file]\n \n def __aiter__(self):\n return self\n \n async def __anext__(self):\n if self.files:\n return self.files.pop()\n raise StopAsyncIteration\n \n return AsyncBlobItemsIterator(filename)\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list_blob_names)\n \n async def mock_delete_blob(self, name, *args, **kwargs):\n assert name == filename\n return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.delete_blob\", mock_delete_blob)\n \n await blob_manager.remove_blob()\n "}}},{"rowIdx":5877,"cells":{"path":{"kind":"string","value":"tests.test_searchmanager/test_remove_content"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <0>: class AsyncSearchResultsIterator:\n <1>: def __init__(self):\n <2>: self.results = [\n <3>: {\n <4>: \"@search.score\": 1,\n <5>: \"id\": \"file-foo_pdf-666F6F2E706466-page-0\",\n <6>: \"content\": \"test content\",\n <7>: \"category\": \"test\",\n <8>: \"sourcepage\": \"foo.pdf#page=1\",\n <9>: \"sourcefile\": \"foo.pdf\",\n<10>: }\n<11>: ]\n<12>: \n<13>: def __aiter__(self):\n<14>: return self\n<15>: \n<16>: async def __anext__(self):\n<17>: if len(self.results) == 0:\n<18>: raise StopAsyncIteration\n<19>: return self.results.pop()\n<20>: \n<21>: async def get_count(self):\n<22>: return len(self.results)\n<23>: \n<24>: search_results = AsyncSearchResultsIterator(\n search_results = AsyncSearchResultsIterator()\n<25>: [\n {\n \"@search.score\": 1,\n \"id\": \"file-foo_pdf-666F6F2E706466-page-0\",\n \"content\": \"test content\",\n \"category\": \"test\",\n \"sourcepage\": \"foo.pdf#page=1\",\n \"sourcefile\": \"foo.pdf\",\n }\n ]\n )\n"},"main_code":{"kind":"string","value":" # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content(monkeypatch, search_info):\n <0> class AsyncSearchResultsIterator:\n <1> def __init__(self):\n <2> self.results = [\n <3> {\n <4> \"@search.score\": 1,\n <5> \"id\": \"file-foo_pdf-666F6F2E706466-page-0\",\n <6> \"content\": \"test content\",\n <7> \"category\": \"test\",\n <8> \"sourcepage\": \"foo.pdf#page=1\",\n <9> \"sourcefile\": \"foo.pdf\",\n<10> }\n<11> ]\n<12> \n<13> def __aiter__(self):\n<14> return self\n<15> \n<16> async def __anext__(self):\n<17> if len(self.results) == 0:\n<18> raise StopAsyncIteration\n<19> return self.results.pop()\n<20> \n<21> async def get_count(self):\n<22> return len(self.results)\n<23> \n<24> search_results = AsyncSearchResultsIterator()\n<25> \n<26> searched_filters = []\n<27> \n<28> async def mock_search(self, *args, **kwargs):\n<29> self.filter = kwargs.get(\"filter\")\n<30> searched_filters.append(self.filter)\n<31> return search_results\n<32> \n<33> monkeypatch.setattr(SearchClient, \"search\", mock_search)\n<34> \n<35> deleted_documents = []\n<36> \n<37> async def mock_delete_documents(self, documents):\n<38> deleted_documents.extend(documents)\n<39> return documents\n<40> \n<41> monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n<42> \n<43> manager = SearchManager(search_info)\n<44> \n<45> await manager.remove_content(\"foo.pdf\")\n<46> \n<47> assert len(searched_filters) == 2, \"It should have searched twice (with no results on second try)\"\n<48> assert searched_filters[0] == \"sourcefile eq 'foo.pdf'\"\n<49> assert len(deleted_documents) == 1, \"It should have deleted one document\""},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content(monkeypatch, search_info):\n # offset: 1\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: tests.test_searchmanager\n AsyncSearchResultsIterator(results)\n \n at: tests.test_searchmanager.AsyncSearchResultsIterator.__init__\n self.results = results\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 1===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 2===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __anext__(self):\n + if len(self.results) == 0:\n + raise StopAsyncIteration\n + return self.results.pop()\n + \n===========changed ref 3===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 4===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 5===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 9===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 10===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 11===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 12===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 14===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_indexer_client(self) -> SearchIndexerClient:\n + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_index_client(self) -> SearchIndexClient:\n + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.strategy\n + USER_AGENT = \"azure-search-chat-demo/1.0.0\"\n + \n===========changed ref 18===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n + self.endpoint = endpoint\n + self.credential = credential\n + self.index_name = index_name\n + \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_client(self) -> SearchClient:\n + return SearchClient(endpoint=self.endpoint, index_name=self.index_name, credential=self.credential)\n + \n===========changed ref 20===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + \"\"\"\n + Class representing a connection to a search service\n + To learn more, please visit https://learn.microsoft.com/azure/search/search-what-is-azure-search\n + \"\"\"\n + \n===========changed ref 21===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + \"\"\"\n + Abstract strategy for ingesting documents into a search service. It has a single setup step to perform any required initialization, and then a run step that actually ingests documents into the search service.\n + \"\"\"\n + \n===========changed ref 22===========\n # module: tests.test_content_file\n + class MockAiohttpClientResponse(aiohttp.ClientResponse):\n + def __init__(self, url, body_bytes, headers=None):\n + self._body = body_bytes\n + self._headers = headers\n + self._cache = {}\n + self.status = 200\n + self.reason = \"OK\"\n + self._url = url\n + \n===========changed ref 23===========\n # module: tests.test_content_file\n + class MockAiohttpClientResponse404(aiohttp.ClientResponse):\n + def __init__(self, url, body_bytes, headers=None):\n + self._body = body_bytes\n + self._headers = headers\n + self._cache = {}\n + self.status = 404\n + self.reason = \"Not Found\"\n + self._url = url\n + \n===========changed ref 24===========\n # module: tests.test_content_file\n + @pytest.mark.asyncio\n + async def test_content_file_useruploaded_notfound(monkeypatch, auth_client, mock_blob_container_client):\n + class MockBlobClient:\n + async def download_blob(self):\n + raise ResourceNotFoundError(MockAiohttpClientResponse404(\"userdoc.pdf\", b\"\"))\n + \n + monkeypatch.setattr(\n + azure.storage.blob.aio.ContainerClient, \"get_blob_client\", lambda *args, **kwargs: MockBlobClient()\n + )\n + \n + async def mock_download_file(self):\n + raise ResourceNotFoundError(MockAiohttpClientResponse404(\"userdoc.pdf\", b\"\"))\n + \n + monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, \"download_file\", mock_download_file)\n + \n + response = await auth_client.get(\"/content/userdoc.pdf\", headers={\"Authorization\": \"Bearer test\"})\n + assert response.status_code == 404\n + "}}},{"rowIdx":5878,"cells":{"path":{"kind":"string","value":"tests.test_searchmanager/test_remove_content_only_oid"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <0>: class AsyncSearchResultsIterator:\n <1>: def __init__(self):\n <2>: self.results = [\n <3>: {\n <4>: \"@search.score\": 1,\n <5>: \"id\": \"file-foo_pdf-666\",\n <6>: \"content\": \"test content\",\n <7>: \"category\": \"test\",\n <8>: \"sourcepage\": \"foo.pdf#page=1\",\n <9>: \"sourcefile\": \"foo.pdf\",\n<10>: \"oids\": [],\n<11>: },\n<12>: {\n<13>: \"@search.score\": 1,\n<14>: \"id\": \"file-foo_pdf-333\",\n<15>: \"content\": \"test content\",\n<16>: \"category\": \"test\",\n<17>: \"sourcepage\": \"foo.pdf#page=1\",\n<18>: \"sourcefile\": \"foo.pdf\",\n<19>: \"oids\": [\"A-USER-ID\", \"B-USER-ID\"],\n<20>: },\n<21>: {\n<22>: \"@search.score\": 1,\n<23>: \"id\": \"file-foo_pdf-222\",\n<24>: \"content\": \"test content\",\n<25>: \"category\": \"test\",\n<26>: \"sourcepage\": \"foo.pdf#page=1\",\n<27>: \"sourcefile\": \"foo.pdf\",\n<28>: \"oids\": [\"A-USER-ID\"],\n<29>: },\n<30>: ]\n<31>: \n<32>: def __aiter__(self):\n<33>: return self\n<34>: \n<35>: async def __anext__(self):\n<36>: if len(self.results) == 0:\n<37>: raise StopAsyncIteration\n<38>: return self.results.pop()\n<39>: \n<40>: async def get_count(self):\n<41>: return len(self.results)\n<42>: \n<43>: search_results = AsyncSearchResultsIterator(\n search_results = AsyncSearchResultsIterator()\n<44>: [\n {\n \"@search.score\": 1,\n \"id\": \"file-foo_pdf-666\",\n \"content\": \"test content\",\n \"category\": \"test\",\n \"sourcepage\": \"foo.pdf#page=1\",\n \"sourcefile\": \"foo.pdf\",\n \"oids\": [],\n },\n {\n \"@search.score\": 1,\n \"id\": \"file-foo_pdf-333\",\n \"content\": \"test content\",\n \"category\": \"test\",\n \"sourcepage\": \"foo.pdf#page=1"},"main_code":{"kind":"string","value":" # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content_only_oid(monkeypatch, search_info):\n <0> class AsyncSearchResultsIterator:\n <1> def __init__(self):\n <2> self.results = [\n <3> {\n <4> \"@search.score\": 1,\n <5> \"id\": \"file-foo_pdf-666\",\n <6> \"content\": \"test content\",\n <7> \"category\": \"test\",\n <8> \"sourcepage\": \"foo.pdf#page=1\",\n <9> \"sourcefile\": \"foo.pdf\",\n<10> \"oids\": [],\n<11> },\n<12> {\n<13> \"@search.score\": 1,\n<14> \"id\": \"file-foo_pdf-333\",\n<15> \"content\": \"test content\",\n<16> \"category\": \"test\",\n<17> \"sourcepage\": \"foo.pdf#page=1\",\n<18> \"sourcefile\": \"foo.pdf\",\n<19> \"oids\": [\"A-USER-ID\", \"B-USER-ID\"],\n<20> },\n<21> {\n<22> \"@search.score\": 1,\n<23> \"id\": \"file-foo_pdf-222\",\n<24> \"content\": \"test content\",\n<25> \"category\": \"test\",\n<26> \"sourcepage\": \"foo.pdf#page=1\",\n<27> \"sourcefile\": \"foo.pdf\",\n<28> \"oids\": [\"A-USER-ID\"],\n<29> },\n<30> ]\n<31> \n<32> def __aiter__(self):\n<33> return self\n<34> \n<35> async def __anext__(self):\n<36> if len(self.results) == 0:\n<37> raise StopAsyncIteration\n<38> return self.results.pop()\n<39> \n<40> async def get_count(self):\n<41> return len(self.results)\n<42> \n<43> search_results = AsyncSearchResultsIterator()\n<44> \n<45> searched_filters = []\n<46> \n<47> async def mock_search(self, *args, **kwargs):\n<48> self.filter = kwargs.get(\"filter\")\n<49> searched_filters.append(self.filter)\n<50> "},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content_only_oid(monkeypatch, search_info):\n # offset: 1\n \n monkeypatch.setattr(SearchClient, \"search\", mock_search)\n \n deleted_documents = []\n \n async def mock_delete_documents(self, documents):\n deleted_documents.extend(documents)\n return documents\n \n monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n \n manager = SearchManager(\n search_info,\n )\n \n await manager.remove_content(\"foo.pdf\", only_oid=\"A-USER-ID\")\n \n assert len(searched_filters) == 2, \"It should have searched twice (with no results on second try)\"\n assert searched_filters[0] == \"sourcefile eq 'foo.pdf'\"\n assert len(deleted_documents) == 1, \"It should have deleted one document\"\n assert deleted_documents[0][\"id\"] == \"file-foo_pdf-222\"\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: tests.test_searchmanager\n AsyncSearchResultsIterator(results)\n \n at: tests.test_searchmanager.test_remove_content_no_docs\n search_results = AsyncSearchResultsIterator([])\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 1===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 2===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 3===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __anext__(self):\n + if len(self.results) == 0:\n + raise StopAsyncIteration\n + return self.results.pop()\n + \n===========changed ref 4===========\n # module: tests.test_searchmanager\n + @pytest.mark.asyncio\n + async def test_remove_content_no_docs(monkeypatch, search_info):\n + search_results = AsyncSearchResultsIterator([])\n + \n + async def mock_search(self, *args, **kwargs):\n + return search_results\n + \n + monkeypatch.setattr(SearchClient, \"search\", mock_search)\n + \n + deleted_calls = []\n + \n + async def mock_delete_documents(self, documents):\n + deleted_calls.append(documents)\n + return documents\n + \n + monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n + \n + manager = SearchManager(search_info)\n + await manager.remove_content(\"foobar.pdf\")\n + \n + assert len(deleted_calls) == 0, \"It should have made zero calls to delete_documents\"\n + \n===========changed ref 5===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 6===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 7===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 8===========\n # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content(monkeypatch, search_info):\n - class AsyncSearchResultsIterator:\n - def __init__(self):\n - self.results = [\n - {\n - \"@search.score\": 1,\n - \"id\": \"file-foo_pdf-666F6F2E706466-page-0\",\n - \"content\": \"test content\",\n - \"category\": \"test\",\n - \"sourcepage\": \"foo.pdf#page=1\",\n - \"sourcefile\": \"foo.pdf\",\n - }\n - ]\n - \n - def __aiter__(self):\n - return self\n - \n - async def __anext__(self):\n - if len(self.results) == 0:\n - raise StopAsyncIteration\n - return self.results.pop()\n - \n - async def get_count(self):\n - return len(self.results)\n - \n + search_results = AsyncSearchResultsIterator(\n - search_results = AsyncSearchResultsIterator()\n + [\n + {\n + \"@search.score\": 1,\n + \"id\": \"file-foo_pdf-666F6F2E706466-page-0\",\n + \"content\": \"test content\",\n + \"category\": \"test\",\n + \"sourcepage\": \"foo.pdf#page=1\",\n + \"sourcefile\": \"foo.pdf\",\n + }\n + ]\n + )\n \n searched_filters = []\n \n async def mock_search(self, *args, **kwargs):\n self.filter = kwargs.get(\"filter\")\n searched_filters.append(self.filter)\n return search_results\n \n monkeypatch.setattr(SearchClient, \"search\", mock_search)\n \n deleted_documents = []\n \n async def mock_delete_documents(self, documents):\n deleted_documents.extend(documents)\n return documents\n \n monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n \n \n===========changed ref 9===========\n # module: tests.test_searchmanager\n @pytest.mark.asyncio\n async def test_remove_content(monkeypatch, search_info):\n # offset: 1\n \n return documents\n \n monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n \n manager = SearchManager(search_info)\n \n await manager.remove_content(\"foo.pdf\")\n \n assert len(searched_filters) == 2, \"It should have searched twice (with no results on second try)\"\n assert searched_filters[0] == \"sourcefile eq 'foo.pdf'\"\n assert len(deleted_documents) == 1, \"It should have deleted one document\"\n assert deleted_documents[0][\"id\"] == \"file-foo_pdf-666F6F2E706466-page-0\"\n \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 13===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 14===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 15===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 16===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + "}}},{"rowIdx":5879,"cells":{"path":{"kind":"string","value":"app.backend.decorators/authenticated_path"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":"<22>: return await route_fn(path, auth_claims)\n return await route_fn(path)\n"},"main_code":{"kind":"string","value":" # module: app.backend.decorators\n + def authenticated_path(route_fn: Callable[[str, Dict[str, Any]], Any]):\n - def authenticated_path(route_fn: Callable[[str], Any]):\n <0> \"\"\"\n <1> Decorator for routes that request a specific file that might require access control enforcement\n <2> \"\"\"\n <3> \n <4> @wraps(route_fn)\n <5> async def auth_handler(path=\"\"):\n <6> # If authentication is enabled, validate the user can access the file\n <7> auth_helper = current_app.config[CONFIG_AUTH_CLIENT]\n <8> search_client = current_app.config[CONFIG_SEARCH_CLIENT]\n <9> authorized = False\n<10> try:\n<11> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers)\n<12> authorized = await auth_helper.check_path_auth(path, auth_claims, search_client)\n<13> except AuthError:\n<14> abort(403)\n<15> except Exception as error:\n<16> logging.exception(\"Problem checking path auth %s\", error)\n<17> return error_response(error, route=\"/content\")\n<18> \n<19> if not authorized:\n<20> abort(403)\n<21> \n<22> return await route_fn(path)\n<23> \n<24> return auth_handler\n<25> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: config\n CONFIG_AUTH_CLIENT = \"auth_client\"\n \n CONFIG_SEARCH_CLIENT = \"search_client\"\n \n at: core.authentication\n AuthError(error, status_code)\n \n at: error\n error_response(error: Exception, route: str, status_code: int=500)\n \n at: functools\n wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T]\n \n at: logging\n exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: typing\n Callable = _CallableType(collections.abc.Callable, 2)\n \n Dict = _alias(dict, 2, inst=False, name='Dict')\n \n \n===========changed ref 0===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 5===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 8===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 9===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 10===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 12===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 13===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 16===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 17===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 18===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 20===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 21===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 22===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + def __init__(self, page_num: int, text: str):\n + self.page_num = page_num\n + self.text = text\n + \n===========changed ref 23===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_indexer_client(self) -> SearchIndexerClient:\n + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_index_client(self) -> SearchIndexClient:\n + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.strategy\n + USER_AGENT = \"azure-search-chat-demo/1.0.0\"\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + \"\"\"\n + A section of a page that has been split into a smaller chunk.\n + \"\"\"\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.page\n + class Page:\n + def __init__(self, page_num: int, offset: int, text: str):\n + self.page_num = page_num\n + self.offset = offset\n + self.text = text\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str):\n + self.endpoint = endpoint\n + self.credential = credential\n + self.index_name = index_name\n + \n===========changed ref 29===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __anext__(self):\n + if len(self.results) == 0:\n + raise StopAsyncIteration\n + return self.results.pop()\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_client(self) -> SearchClient:\n + return SearchClient(endpoint=self.endpoint, index_name=self.index_name, credential=self.credential)\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.jsonparser\n + class JsonParser(Parser):\n + \"\"\"\n + Concrete parser that can parse JSON into Page objects. A top-level object becomes a single Page, while a top-level array becomes multiple Page objects.\n + \"\"\"\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + \"\"\"\n + Class representing a connection to a search service\n + To learn more, please visit https://learn.microsoft.com/azure/search/search-what-is-azure-search\n + \"\"\"\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + \"\"\"\n + Abstract strategy for ingesting documents into a search service. It has a single setup step to perform any required initialization, and then a run step that actually ingests documents into the search service.\n + \"\"\"\n + \n===========changed ref 34===========\n # module: tests.test_content_file\n + class MockAiohttpClientResponse(aiohttp.ClientResponse):\n + def __init__(self, url, body_bytes, headers=None):\n + self._body = body_bytes\n + self._headers = headers\n + self._cache = {}\n + self.status = 200\n + self.reason = \"OK\"\n + self._url = url\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + def parse(self, content: IO) -> AsyncGenerator[Page, None]:\n + data = content.read()\n + decoded_data = data.decode(\"utf-8\")\n + text = cleanup_data(decoded_data)\n + yield Page(0, 0, text=text)\n + "}}},{"rowIdx":5880,"cells":{"path":{"kind":"string","value":"scripts.auth_update/main"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":"<19>: \"http://localhost:5173/redirect\",\n"},"main_code":{"kind":"string","value":" # module: scripts.auth_update\n def main():\n <0> if not test_authentication_enabled():\n <1> print(\"Not updating authentication.\")\n <2> exit(0)\n <3> \n <4> credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n <5> auth_headers = await get_auth_headers(credential)\n <6> \n <7> uri = os.getenv(\"BACKEND_URI\")\n <8> client_app_id = os.getenv(\"AZURE_CLIENT_APP_ID\", None)\n <9> if client_app_id:\n<10> client_object_id = await get_application(auth_headers, client_app_id)\n<11> if client_object_id:\n<12> print(f\"Updating redirect URIs for client app ID {client_app_id}...\")\n<13> # Redirect URIs need to be relative to the deployed application\n<14> payload = {\n<15> \"publicClient\": {\"redirectUris\": []},\n<16> \"spa\": {\n<17> \"redirectUris\": [\n<18> \"http://localhost:50505/redirect\",\n<19> f\"{uri}/redirect\",\n<20> ]\n<21> },\n<22> \"web\": {\n<23> \"redirectUris\": [\n<24> f\"{uri}/.auth/login/aad/callback\",\n<25> ]\n<26> },\n<27> }\n<28> await update_application(auth_headers, client_object_id, payload)\n<29> print(f\"Application update for client app id {client_app_id} complete.\")\n<30> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: auth_common\n get_auth_headers(credential: AsyncTokenCredential)\n \n get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]\n \n update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object)\n \n test_authentication_enabled()\n \n at: os\n getenv(key: str, default: _T) -> Union[str, _T]\n getenv(key: str) -> Optional[str]\n \n \n===========changed ref 0===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 1===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 5===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 11===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 14===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 15===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 16===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 18===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 19===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 20===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 21===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 22===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 23===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 29===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 30===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 31===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 36===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 37===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 38===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + \n===========changed ref 40===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def file_extension(self):\n + return os.path.splitext(self.content.name)[1]\n + \n===========changed ref 41===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + def __init__(self, page_num: int, text: str):\n + self.page_num = page_num\n + self.text = text\n + \n===========changed ref 42===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_indexer_client(self) -> SearchIndexerClient:\n + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 43===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_index_client(self) -> SearchIndexClient:\n + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.strategy\n + USER_AGENT = \"azure-search-chat-demo/1.0.0\"\n + \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + \"\"\"\n + Concrete strategy for listing files that are located in a local filesystem\n + \"\"\"\n + \n===========changed ref 46===========\n + # module: app.backend.prepdocs\n + def main(strategy: Strategy, setup_index: bool = True):\n + if setup_index:\n + await strategy.setup()\n + \n + await strategy.run()\n + \n===========changed ref 47===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def list_paths(self) -> AsyncGenerator[str, None]:\n + async for p in self._list_paths(self.path_pattern):\n + yield p\n + "}}},{"rowIdx":5881,"cells":{"path":{"kind":"string","value":"app.backend.app/content_file"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":"<13>: blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n<14>: blob: Union[BlobDownloader, DatalakeDownloader]\n<17>: logging.info(\"Path not found in general Blob container: %s\", path)\n if current_app.config[CONFIG_USER_UPLOAD_ENABLED]:\n try:\n user_oid = auth_claims[\"oid\"]\n user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid)\n file_client = user_directory_client.get_file_client(path)\n blob = await file_client.download_file()\n except ResourceNotFoundError:\n logging.exception(\"Path not found in DataLake: %s\", path)\n logging.exception(\"Path not found: %s\", path)\n<18>: abort(404)\n else:\n abort("},"main_code":{"kind":"string","value":" # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n <0> \"\"\"\n <1> Serve content files from blob storage from within the app to keep the example self-contained.\n <2> *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in\n <3> if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control\n <4> if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to\n <5> This is also slow and memory hungry.\n <6> \"\"\"\n <7> # Remove page number from path, filename-1.txt -> filename.txt\n <8> # This shouldn't typically be necessary as browsers don't send hash fragments to servers\n <9> if path.find(\"#page=\") > 0:\n<10> path_parts = path.rsplit(\"#page=\", 1)\n<11> path = path_parts[0]\n<12> logging.info(\"Opening file %s\", path)\n<13> blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n<14> try:\n<15> blob = await blob_container_client.get_blob_client(path).download_blob()\n<16> except ResourceNotFoundError:\n<17> logging.exception(\"Path not found: %s\", path)\n<18> abort(404)\n<19> if not blob.properties or not blob.properties.has_key(\"content_settings\"):\n<20> abort(404)\n<21> mime_type = blob.properties[\"content_settings\"][\"content_type\"]\n<22> if mime_type == \"application/octet-stream\":\n<23> mime_type = mimetypes.guess_type(path)[0] or \"application/octet-stream\"\n<24> blob_file = io.BytesIO()\n<25> await blob.readinto(blob_file)\n<26> blob_file.seek(0)\n<27> return await send_file(blob_file"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n # offset: 1\n \n \n===========unchanged ref 0===========\n at: app.backend.app\n bp = Blueprint(\"routes\", __name__, static_folder=\"static\")\n \n at: config\n CONFIG_BLOB_CONTAINER_CLIENT = \"blob_container_client\"\n \n at: decorators\n authenticated_path(route_fn: Callable[[str, Dict[str, Any]], Any])\n \n at: io\n BytesIO(initial_bytes: bytes=...)\n \n at: io.BytesIO\n seek(self, offset: int, whence: int=..., /) -> int\n \n at: logging\n exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None\n \n at: mimetypes\n guess_type(url: Union[Text, PathLike[str]], strict: bool=...) -> Tuple[Optional[str], Optional[str]]\n \n \n===========changed ref 0===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 3===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 5===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 13===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 16===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 17===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 18===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 20===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 21===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 22===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 23===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 33===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 34===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 35===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 36===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 37===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 38===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 40===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 41===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 42===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 43===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def file_extension(self):\n + return os.path.splitext(self.content.name)[1]\n + \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + def __init__(self, page_num: int, text: str):\n + self.page_num = page_num\n + self.text = text\n + "}}},{"rowIdx":5882,"cells":{"path":{"kind":"string","value":"app.backend.app/config"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <5>: \"showUserUpload\": current_app.config[CONFIG_USER_UPLOAD_ENABLED],\n"},"main_code":{"kind":"string","value":" # module: app.backend.app\n @bp.route(\"/config\", methods=[\"GET\"])\n def config():\n <0> return jsonify(\n <1> {\n <2> \"showGPT4VOptions\": current_app.config[CONFIG_GPT4V_DEPLOYED],\n <3> \"showSemanticRankerOption\": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED],\n <4> \"showVectorOption\": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED],\n <5> }\n <6> )\n <7> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: app.backend.app\n bp = Blueprint(\"routes\", __name__, static_folder=\"static\")\n \n at: config\n CONFIG_GPT4V_DEPLOYED = \"gpt4v_deployed\"\n \n CONFIG_SEMANTIC_RANKER_DEPLOYED = \"semantic_ranker_deployed\"\n \n CONFIG_VECTOR_SEARCH_ENABLED = \"vector_search_enabled\"\n \n \n===========changed ref 0===========\n # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n \"\"\"\n Serve content files from blob storage from within the app to keep the example self-contained.\n *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in\n if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control\n if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to\n This is also slow and memory hungry.\n \"\"\"\n # Remove page number from path, filename-1.txt -> filename.txt\n # This shouldn't typically be necessary as browsers don't send hash fragments to servers\n if path.find(\"#page=\") > 0:\n path_parts = path.rsplit(\"#page=\", 1)\n path = path_parts[0]\n logging.info(\"Opening file %s\", path)\n + blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n - blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n + blob: Union[BlobDownloader, DatalakeDownloader]\n try:\n blob = await blob_container_client.get_blob_client(path).download_blob()\n except ResourceNotFoundError:\n + logging.info(\"Path not found in general Blob container: %s\", path)\n + if current_app.config[CONFIG_USER_UPLOAD_ENABLED]:\n + try:\n + user_oid = auth_claims[\"oid\"]\n + user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n + user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid)\n + file_client = user_directory_client.get_file_client(path)\n===========changed ref 1===========\n # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n # offset: 1\n _directory_client(user_oid)\n + file_client = user_directory_client.get_file_client(path)\n + blob = await file_client.download_file()\n + except ResourceNotFoundError:\n + logging.exception(\"Path not found in DataLake: %s\", path)\n - logging.exception(\"Path not found: %s\", path)\n + abort(404)\n + else:\n + abort(404)\n - abort(404)\n if not blob.properties or not blob.properties.has_key(\"content_settings\"):\n abort(404)\n mime_type = blob.properties[\"content_settings\"][\"content_type\"]\n if mime_type == \"application/octet-stream\":\n mime_type = mimetypes.guess_type(path)[0] or \"application/octet-stream\"\n blob_file = io.BytesIO()\n await blob.readinto(blob_file)\n blob_file.seek(0)\n return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path)\n \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 5===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 15===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 18===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 19===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 20===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 21===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 22===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 23===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 35===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 36===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + "}}},{"rowIdx":5883,"cells":{"path":{"kind":"string","value":"app.backend.app/close_clients"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <2>: if current_app.config.get(CONFIG_USER_BLOB_CONTAINER_CLIENT):\n await current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT].close()\n"},"main_code":{"kind":"string","value":" # module: app.backend.app\n @bp.after_app_serving\n async def close_clients():\n <0> await current_app.config[CONFIG_SEARCH_CLIENT].close()\n <1> await current_app.config[CONFIG_BLOB_CONTAINER_CLIENT].close()\n <2> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: app.backend.app\n bp = Blueprint(\"routes\", __name__, static_folder=\"static\")\n \n at: config\n CONFIG_BLOB_CONTAINER_CLIENT = \"blob_container_client\"\n \n CONFIG_SEARCH_CLIENT = \"search_client\"\n \n \n===========changed ref 0===========\n # module: app.backend.app\n + @bp.get(\"/list_uploaded\")\n + @authenticated\n + async def list_uploaded(auth_claims: dict[str, Any]):\n + user_oid = auth_claims[\"oid\"]\n + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n + files = []\n + try:\n + all_paths = user_blob_container_client.get_paths(path=user_oid)\n + async for path in all_paths:\n + files.append(path.name.split(\"/\", 1)[1])\n + except ResourceNotFoundError as error:\n + if error.status_code != 404:\n + current_app.logger.exception(\"Error listing uploaded files\", error)\n + return jsonify(files), 200\n + \n===========changed ref 1===========\n # module: app.backend.app\n + @bp.post(\"/delete_uploaded\")\n + @authenticated\n + async def delete_uploaded(auth_claims: dict[str, Any]):\n + request_json = await request.get_json()\n + filename = request_json.get(\"filename\")\n + user_oid = auth_claims[\"oid\"]\n + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n + user_directory_client = user_blob_container_client.get_directory_client(user_oid)\n + file_client = user_directory_client.get_file_client(filename)\n + await file_client.delete_file()\n + ingester = current_app.config[CONFIG_INGESTER]\n + await ingester.remove_file(filename, user_oid)\n + return jsonify({\"message\": f\"File {filename} deleted successfully\"}), 200\n + \n===========changed ref 2===========\n # module: app.backend.app\n @bp.route(\"/config\", methods=[\"GET\"])\n def config():\n return jsonify(\n {\n \"showGPT4VOptions\": current_app.config[CONFIG_GPT4V_DEPLOYED],\n \"showSemanticRankerOption\": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED],\n \"showVectorOption\": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED],\n + \"showUserUpload\": current_app.config[CONFIG_USER_UPLOAD_ENABLED],\n }\n )\n \n===========changed ref 3===========\n # module: app.backend.app\n + @bp.post(\"/upload\")\n + @authenticated\n + async def upload(auth_claims: dict[str, Any]):\n + request_files = await request.files\n + if \"file\" not in request_files:\n + # If no files were included in the request, return an error response\n + return jsonify({\"message\": \"No file part in the request\", \"status\": \"failed\"}), 400\n + \n + user_oid = auth_claims[\"oid\"]\n + file = request_files.getlist(\"file\")[0]\n + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n + user_directory_client = user_blob_container_client.get_directory_client(user_oid)\n + try:\n + await user_directory_client.get_directory_properties()\n + except ResourceNotFoundError:\n + current_app.logger.info(\"Creating directory for user %s\", user_oid)\n + await user_directory_client.create_directory()\n + await user_directory_client.set_access_control(owner=user_oid)\n + file_client = user_directory_client.get_file_client(file.filename)\n + file_io = file\n + file_io.name = file.filename\n + file_io = io.BufferedReader(file_io)\n + await file_client.upload_data(file_io, overwrite=True, metadata={\"UploadedBy\": user_oid})\n + file_io.seek(0)\n + ingester = current_app.config[CONFIG_INGESTER]\n + await ingester.add_file(File(content=file_io, acls={\"oids\": [user_oid]}))\n + return jsonify({\"message\": \"File uploaded successfully\"}), 200\n + \n===========changed ref 4===========\n # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n \"\"\"\n Serve content files from blob storage from within the app to keep the example self-contained.\n *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in\n if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control\n if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to\n This is also slow and memory hungry.\n \"\"\"\n # Remove page number from path, filename-1.txt -> filename.txt\n # This shouldn't typically be necessary as browsers don't send hash fragments to servers\n if path.find(\"#page=\") > 0:\n path_parts = path.rsplit(\"#page=\", 1)\n path = path_parts[0]\n logging.info(\"Opening file %s\", path)\n + blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n - blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]\n + blob: Union[BlobDownloader, DatalakeDownloader]\n try:\n blob = await blob_container_client.get_blob_client(path).download_blob()\n except ResourceNotFoundError:\n + logging.info(\"Path not found in general Blob container: %s\", path)\n + if current_app.config[CONFIG_USER_UPLOAD_ENABLED]:\n + try:\n + user_oid = auth_claims[\"oid\"]\n + user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT]\n + user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid)\n + file_client = user_directory_client.get_file_client(path)\n===========changed ref 5===========\n # module: app.backend.app\n @bp.route(\"/content/\")\n @authenticated_path\n + async def content_file(path: str, auth_claims: Dict[str, Any]):\n - async def content_file(path: str):\n # offset: 1\n _directory_client(user_oid)\n + file_client = user_directory_client.get_file_client(path)\n + blob = await file_client.download_file()\n + except ResourceNotFoundError:\n + logging.exception(\"Path not found in DataLake: %s\", path)\n - logging.exception(\"Path not found: %s\", path)\n + abort(404)\n + else:\n + abort(404)\n - abort(404)\n if not blob.properties or not blob.properties.has_key(\"content_settings\"):\n abort(404)\n mime_type = blob.properties[\"content_settings\"][\"content_type\"]\n if mime_type == \"application/octet-stream\":\n mime_type = mimetypes.guess_type(path)[0] or \"application/octet-stream\"\n blob_file = io.BytesIO()\n await blob.readinto(blob_file)\n blob_file.seek(0)\n return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path)\n \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + "}}},{"rowIdx":5884,"cells":{"path":{"kind":"string","value":"tests.test_authenticationhelper/test_auth_setup"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <1>: result = helper.get_auth_setup_for_client()\n assert helper.get_auth_setup_for_client() == {\n <2>: \"useLogin\": True,\n <3>: \"requireAccessControl\": False,\n <4>: \"msalConfig\": {\n <5>: \"auth\": {\n <6>: \"clientId\": \"CLIENT_APP\",\n <7>: \"authority\": \"https://login.microsoftonline.com/TENANT_ID\",\n <8>: \"redirectUri\": \"/redirect\",\n <9>: \"postLogoutRedirectUri\": \"/\",\n<10>: \"navigateToLoginRequestUrl\": False,\n<11>: },\n<12>: \"cache\": {\"cacheLocation\": \"localStorage\", \"storeAuthStateInCookie\": False},\n<13>: },\n<14>: \"loginRequest\": {\n<15>: \"scopes\": [\".default\"],\n<16>: },\n<17>: \"tokenRequest\": {\n<18>: \"scopes\": [\"api://SERVER_APP/access_as_user\"],\n<19>: },\n<20>: }\n<21>: snapshot.assert_match(json.dumps(result, indent=4), \"result.json\")\n"},"main_code":{"kind":"string","value":" # module: tests.test_authenticationhelper\n + def test_auth_setup(mock_confidential_client_success, mock_validate_token_success, snapshot):\n - def test_auth_setup(mock_confidential_client_success, mock_validate_token_success):\n <0> helper = create_authentication_helper()\n <1> assert helper.get_auth_setup_for_client() == {\n <2> \"useLogin\": True,\n <3> \"requireAccessControl\": False,\n <4> \"msalConfig\": {\n <5> \"auth\": {\n <6> \"clientId\": \"CLIENT_APP\",\n <7> \"authority\": \"https://login.microsoftonline.com/TENANT_ID\",\n <8> \"redirectUri\": \"/redirect\",\n <9> \"postLogoutRedirectUri\": \"/\",\n<10> \"navigateToLoginRequestUrl\": False,\n<11> },\n<12> \"cache\": {\"cacheLocation\": \"localStorage\", \"storeAuthStateInCookie\": False},\n<13> },\n<14> \"loginRequest\": {\n<15> \"scopes\": [\".default\"],\n<16> },\n<17> \"tokenRequest\": {\n<18> \"scopes\": [\"api://SERVER_APP/access_as_user\"],\n<19> },\n<20> }\n<21> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: tests.conftest\n mock_confidential_client_success(monkeypatch)\n \n at: tests.test_authenticationhelper\n create_authentication_helper(require_access_control: bool=False)\n \n \n===========changed ref 0===========\n + # module: app.backend.prepdocslib.fileprocessor\n + \n + \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 4===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 5===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 14===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 17===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 18===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 19===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 20===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 21===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 22===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 23===========\n + # module: app.backend.prepdocslib.fileprocessor\n + @dataclass(frozen=True)\n + class FileProcessor:\n + parser: Parser\n + splitter: TextSplitter\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 35===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 36===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 37===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 38===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 40===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 41===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 42===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 43===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 44===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + \n===========changed ref 46===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def file_extension(self):\n + return os.path.splitext(self.content.name)[1]\n + \n===========changed ref 47===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + def __init__(self, page_num: int, text: str):\n + self.page_num = page_num\n + self.text = text\n + \n===========changed ref 48===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_indexer_client(self) -> SearchIndexerClient:\n + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 49===========\n + # module: app.backend.prepdocslib.strategy\n + class SearchInfo:\n + def create_search_index_client(self) -> SearchIndexClient:\n + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential)\n + \n===========changed ref 50===========\n + # module: app.backend.prepdocslib.strategy\n + USER_AGENT = \"azure-search-chat-demo/1.0.0\"\n + \n===========changed ref 51===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + \"\"\"\n + Concrete strategy for listing files that are located in a local filesystem\n + \"\"\"\n + "}}},{"rowIdx":5885,"cells":{"path":{"kind":"string","value":"tests.test_authenticationhelper/test_auth_setup_required_access_control"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <1>: result = helper.get_auth_setup_for_client()\n assert helper.get_auth_setup_for_client() == {\n <2>: \"useLogin\": True,\n <3>: \"requireAccessControl\": True,\n <4>: \"msalConfig\": {\n <5>: \"auth\": {\n <6>: \"clientId\": \"CLIENT_APP\",\n <7>: \"authority\": \"https://login.microsoftonline.com/TENANT_ID\",\n <8>: \"redirectUri\": \"/redirect\",\n <9>: \"postLogoutRedirectUri\": \"/\",\n<10>: \"navigateToLoginRequestUrl\": False,\n<11>: },\n<12>: \"cache\": {\"cacheLocation\": \"localStorage\", \"storeAuthStateInCookie\": False},\n<13>: },\n<14>: \"loginRequest\": {\n<15>: \"scopes\": [\".default\"],\n<16>: },\n<17>: \"tokenRequest\": {\n<18>: \"scopes\": [\"api://SERVER_APP/access_as_user\"],\n<19>: },\n<20>: }\n<21>: snapshot.assert_match(json.dumps(result, indent=4), \"result.json\")\n"},"main_code":{"kind":"string","value":" # module: tests.test_authenticationhelper\n + def test_auth_setup_required_access_control(mock_confidential_client_success, mock_validate_token_success, snapshot):\n - def test_auth_setup_required_access_control(mock_confidential_client_success, mock_validate_token_success):\n <0> helper = create_authentication_helper(require_access_control=True)\n <1> assert helper.get_auth_setup_for_client() == {\n <2> \"useLogin\": True,\n <3> \"requireAccessControl\": True,\n <4> \"msalConfig\": {\n <5> \"auth\": {\n <6> \"clientId\": \"CLIENT_APP\",\n <7> \"authority\": \"https://login.microsoftonline.com/TENANT_ID\",\n <8> \"redirectUri\": \"/redirect\",\n <9> \"postLogoutRedirectUri\": \"/\",\n<10> \"navigateToLoginRequestUrl\": False,\n<11> },\n<12> \"cache\": {\"cacheLocation\": \"localStorage\", \"storeAuthStateInCookie\": False},\n<13> },\n<14> \"loginRequest\": {\n<15> \"scopes\": [\".default\"],\n<16> },\n<17> \"tokenRequest\": {\n<18> \"scopes\": [\"api://SERVER_APP/access_as_user\"],\n<19> },\n<20> }\n<21> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: tests.test_authenticationhelper\n create_authentication_helper(require_access_control: bool=False)\n \n \n===========changed ref 0===========\n # module: tests.test_authenticationhelper\n + def test_auth_setup(mock_confidential_client_success, mock_validate_token_success, snapshot):\n - def test_auth_setup(mock_confidential_client_success, mock_validate_token_success):\n helper = create_authentication_helper()\n + result = helper.get_auth_setup_for_client()\n - assert helper.get_auth_setup_for_client() == {\n - \"useLogin\": True,\n - \"requireAccessControl\": False,\n - \"msalConfig\": {\n - \"auth\": {\n - \"clientId\": \"CLIENT_APP\",\n - \"authority\": \"https://login.microsoftonline.com/TENANT_ID\",\n - \"redirectUri\": \"/redirect\",\n - \"postLogoutRedirectUri\": \"/\",\n - \"navigateToLoginRequestUrl\": False,\n - },\n - \"cache\": {\"cacheLocation\": \"localStorage\", \"storeAuthStateInCookie\": False},\n - },\n - \"loginRequest\": {\n - \"scopes\": [\".default\"],\n - },\n - \"tokenRequest\": {\n - \"scopes\": [\"api://SERVER_APP/access_as_user\"],\n - },\n - }\n + snapshot.assert_match(json.dumps(result, indent=4), \"result.json\")\n \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.fileprocessor\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 5===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 15===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 18===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 19===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 20===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 21===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 22===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 23===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.fileprocessor\n + @dataclass(frozen=True)\n + class FileProcessor:\n + parser: Parser\n + splitter: TextSplitter\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 36===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 37===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 38===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 40===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 41===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 42===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 43===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 45===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 46===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + "}}},{"rowIdx":5886,"cells":{"path":{"kind":"string","value":"tests.conftest/mock_acs_search"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <0>: monkeypatch.setattr(SearchClient, \"search\", mock_search)\n"},"main_code":{"kind":"string","value":" # module: tests.conftest\n @pytest.fixture\n def mock_acs_search(monkeypatch):\n <0> monkeypatch.setattr(SearchClient, \"search\", mock_search)\n <1> monkeypatch.setattr(SearchClient, \"search\", mock_search)\n <2> \n <3> async def mock_get_index(*args, **kwargs):\n <4> return MockSearchIndex\n <5> \n <6> monkeypatch.setattr(SearchIndexClient, \"get_index\", mock_get_index)\n <7> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n + # module: app.backend.prepdocslib.embeddings\n + \n + \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.fileprocessor\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 5===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.embeddings\n + class OpenAIEmbeddings(ABC):\n + def create_client(self) -> AsyncOpenAI:\n + raise NotImplementedError\n + \n===========changed ref 16===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 18===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 20===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 21===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 22===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 23===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 24===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 25===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 26===========\n + # module: app.backend.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.fileprocessor\n + @dataclass(frozen=True)\n + class FileProcessor:\n + parser: Parser\n + splitter: TextSplitter\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 36===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 37===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 38===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 39===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 40===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 41===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 42===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 43===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 46===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 47===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 48===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 49===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + \n===========changed ref 50===========\n + # module: app.backend.prepdocslib.embeddings\n + class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n + self.token_provider = token_provider\n + self.endpoint = endpoint\n + \n===========changed ref 51===========\n + # module: app.backend.prepdocslib.embeddings\n + class EmbeddingBatch:\n + def __init__(self, texts: List[str], token_length: int):\n + self.texts = texts\n + self.token_length = token_length\n + \n===========changed ref 52===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def file_extension(self):\n + return os.path.splitext(self.content.name)[1]\n + \n===========changed ref 53===========\n + # module: app.backend.prepdocslib.page\n + class SplitPage:\n + def __init__(self, page_num: int, text: str):\n + self.page_num = page_num\n + self.text = text\n + "}}},{"rowIdx":5887,"cells":{"path":{"kind":"string","value":"tests.conftest/auth_client"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <5>: monkeypatch.setenv(\"USE_USER_UPLOAD\", \"true\")\n monkeypatch.setenv(\"AZURE_USERSTORAGE_ACCOUNT\", \"test-userstorage-account\")\n monkeypatch.setenv(\"AZURE_USERSTORAGE_CONTAINER\", \"test-userstorage-container\")\n monkeypatch.setenv(\"USE_LOCAL_PDF_PARSER\", \"true\")\n monkeypatch.setenv(\"USE_LOCAL_HTML_PARSER\", \"true\")\n monkeypatch.setenv(\"AZURE_DOCUMENTINTELLIGENCE_SERVICE\", \"test-documentintelligence-service\")\n"},"main_code":{"kind":"string","value":" # module: tests.conftest\n @pytest_asyncio.fixture(params=auth_envs)\n async def auth_client(\n monkeypatch,\n mock_openai_chatcompletion,\n mock_openai_embedding,\n mock_confidential_client_success,\n mock_validate_token_success,\n mock_list_groups_success,\n mock_acs_search_filter,\n mock_get_secret,\n request,\n ):\n <0> monkeypatch.setenv(\"AZURE_STORAGE_ACCOUNT\", \"test-storage-account\")\n <1> monkeypatch.setenv(\"AZURE_STORAGE_CONTAINER\", \"test-storage-container\")\n <2> monkeypatch.setenv(\"AZURE_SEARCH_INDEX\", \"test-search-index\")\n <3> monkeypatch.setenv(\"AZURE_SEARCH_SERVICE\", \"test-search-service\")\n <4> monkeypatch.setenv(\"AZURE_OPENAI_CHATGPT_MODEL\", \"gpt-35-turbo\")\n <5> for key, value in request.param.items():\n <6> monkeypatch.setenv(key, value)\n <7> \n <8> with mock.patch(\"app.DefaultAzureCredential\") as mock_default_azure_credential:\n <9> mock_default_azure_credential.return_value = MockAzureCredential()\n<10> quart_app = app.create_app()\n<11> \n<12> async with quart_app.test_app() as test_app:\n<13> quart_app.config.update({\"TESTING\": True})\n<14> mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT])\n<15> mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT])\n<16> client = test_app.test_client()\n<17> client.config = quart_app.config\n<18> \n<19> yield client\n<20> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: tests.conftest\n @pytest.fixture\n def mock_acs_search(monkeypatch):\n - monkeypatch.setattr(SearchClient, \"search\", mock_search)\n monkeypatch.setattr(SearchClient, \"search\", mock_search)\n \n async def mock_get_index(*args, **kwargs):\n return MockSearchIndex\n \n monkeypatch.setattr(SearchIndexClient, \"get_index\", mock_get_index)\n \n===========changed ref 1===========\n # module: tests.conftest\n envs = [\n {\n \"OPENAI_HOST\": \"openai\",\n \"OPENAI_API_KEY\": \"secretkey\",\n \"OPENAI_ORGANIZATION\": \"organization\",\n },\n {\n \"OPENAI_HOST\": \"azure\",\n \"AZURE_OPENAI_SERVICE\": \"test-openai-service\",\n \"AZURE_OPENAI_CHATGPT_DEPLOYMENT\": \"test-chatgpt\",\n \"AZURE_OPENAI_EMB_DEPLOYMENT\": \"test-ada\",\n \"USE_GPT4V\": \"true\",\n \"AZURE_OPENAI_GPT4V_MODEL\": \"gpt-4\",\n \"VISION_SECRET_NAME\": \"mysecret\",\n \"VISION_ENDPOINT\": \"https://testvision.cognitiveservices.azure.com/\",\n \"AZURE_KEY_VAULT_NAME\": \"mykeyvault\",\n },\n ]\n \n auth_envs = [\n {\n \"OPENAI_HOST\": \"azure\",\n \"AZURE_OPENAI_SERVICE\": \"test-openai-service\",\n \"AZURE_OPENAI_CHATGPT_DEPLOYMENT\": \"test-chatgpt\",\n \"AZURE_OPENAI_EMB_DEPLOYMENT\": \"test-ada\",\n \"AZURE_USE_AUTHENTICATION\": \"true\",\n + \"AZURE_USER_STORAGE_ACCOUNT\": \"test-user-storage-account\",\n + \"AZURE_USER_STORAGE_CONTAINER\": \"test-user-storage-container\",\n \"AZURE_SERVER_APP_ID\": \"SERVER_APP\",\n \"AZURE_SERVER_APP_SECRET\": \"SECRET\",\n \"AZURE_CLIENT_APP_ID\": \"CLIENT_APP\",\n \"AZURE_TENANT_ID\": \"TENANT_ID\",\n },\n ]\n \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.embeddings\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.fileprocessor\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 5===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 6===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 7===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 17===========\n + # module: app.backend.prepdocslib.embeddings\n + class OpenAIEmbeddings(ABC):\n + def create_client(self) -> AsyncOpenAI:\n + raise NotImplementedError\n + \n===========changed ref 18===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 20===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 21===========\n + # module: app.backend.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 22===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 23===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 24===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 25===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 26===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 27===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.fileprocessor\n + @dataclass(frozen=True)\n + class FileProcessor:\n + parser: Parser\n + splitter: TextSplitter\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 36===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 37===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 38===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 40===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 41===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 42===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 43===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + "}}},{"rowIdx":5888,"cells":{"path":{"kind":"string","value":"scripts.auth_init/create_client_app_payload"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"853cc726ca9d6e0d10b595024fef4f5fd70826b4"},"commit_message":{"kind":"string","value":"Optional upload documents feature (#1395)"},"ground_truth":{"kind":"string","value":" <7>: \"spa\": {\"redirectUris\": [\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]},\n \"spa\": {\"redirectUris\": [\"http://localhost:50505/redirect\"]},\n"},"main_code":{"kind":"string","value":" # module: scripts.auth_init\n def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int):\n <0> return {\n <1> \"displayName\": f\"Azure Search OpenAI Chat Client App {identifier}\",\n <2> \"signInAudience\": \"AzureADMyOrg\",\n <3> \"web\": {\n <4> \"redirectUris\": [\"http://localhost:50505/.auth/login/aad/callback\"],\n <5> \"implicitGrantSettings\": {\"enableIdTokenIssuance\": True},\n <6> },\n <7> \"spa\": {\"redirectUris\": [\"http://localhost:50505/redirect\"]},\n <8> \"requiredResourceAccess\": [\n <9> # access_as_user from server app\n<10> {\n<11> \"resourceAppId\": server_app_id,\n<12> \"resourceAccess\": [\n<13> {\n<14> \"id\": server_app_permission_setup_payload[\"api\"][\"oauth2PermissionScopes\"][0][\"id\"],\n<15> \"type\": \"Scope\",\n<16> }\n<17> ],\n<18> },\n<19> # Graph User.Read\n<20> {\n<21> \"resourceAppId\": \"00000003-0000-0000-c000-000000000000\",\n<22> \"resourceAccess\": [{\"id\": \"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", \"type\": \"Scope\"}],\n<23> },\n<24> ],\n<25> }\n<26> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: typing\n Dict = _alias(dict, 2, inst=False, name='Dict')\n \n \n===========changed ref 0===========\n + # module: app.backend.prepdocslib.searchmanager\n + \n + \n===========changed ref 1===========\n + # module: app.backend.prepdocslib.embeddings\n + \n + \n===========changed ref 2===========\n + # module: app.backend.prepdocslib.fileprocessor\n + \n + \n===========changed ref 3===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + \n + \n===========changed ref 4===========\n + # module: app.backend.prepdocslib.filestrategy\n + \n + \n===========changed ref 5===========\n + # module: app.backend.prepdocslib.blobmanager\n + \n + \n===========changed ref 6===========\n + # module: tests.test_upload\n + \n + \n===========changed ref 7===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + \n + \n===========changed ref 8===========\n + # module: app.backend.prepdocslib.textsplitter\n + \n + \n===========changed ref 9===========\n + # module: app.backend.prepdocslib.parser\n + \n + \n===========changed ref 10===========\n + # module: app.backend.prepdocslib.pdfparser\n + \n + \n===========changed ref 11===========\n + # module: app.backend.prepdocslib.htmlparser\n + \n + \n===========changed ref 12===========\n + # module: app.backend.prepdocslib.page\n + \n + \n===========changed ref 13===========\n + # module: app.backend.prepdocslib.textparser\n + \n + \n===========changed ref 14===========\n + # module: app.backend.prepdocslib.jsonparser\n + \n + \n===========changed ref 15===========\n + # module: app.backend.prepdocslib.strategy\n + \n + \n===========changed ref 16===========\n + # module: app.backend.prepdocslib.embeddings\n + class OpenAIEmbeddings(ABC):\n + def create_client(self) -> AsyncOpenAI:\n + raise NotImplementedError\n + \n===========changed ref 17===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __aiter__(self):\n + return self\n + \n===========changed ref 18===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def run(self):\n + raise NotImplementedError\n + \n===========changed ref 19===========\n + # module: app.backend.prepdocslib.strategy\n + class Strategy(ABC):\n + def setup(self):\n + raise NotImplementedError\n + \n===========changed ref 20===========\n + # module: app.backend.prepdocslib.embeddings\n + class ExtraArgs(TypedDict, total=False):\n + dimensions: int\n + \n===========changed ref 21===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def __init__(self, results):\n + self.results = results\n + \n===========changed ref 22===========\n # module: tests.test_searchmanager\n + class AsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.results)\n + \n===========changed ref 23===========\n # module: tests.mocks\n class MockAsyncSearchResultsIterator:\n + def get_count(self):\n + return len(self.data)\n + \n===========changed ref 24===========\n + # module: app.backend.prepdocslib.htmlparser\n + class LocalHTMLParser(Parser):\n + \"\"\"Parses HTML text into Page objects.\"\"\"\n + \n===========changed ref 25===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n + return self.create_embedding_response\n + \n===========changed ref 26===========\n # module: tests.mocks\n class MockBlob:\n + def readinto(self, buffer: BytesIO):\n + buffer.write(b\"test\")\n + \n===========changed ref 27===========\n + # module: app.backend.prepdocslib.searchmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 28===========\n + # module: app.backend.prepdocslib.embeddings\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 29===========\n + # module: app.backend.prepdocslib.fileprocessor\n + @dataclass(frozen=True)\n + class FileProcessor:\n + parser: Parser\n + splitter: TextSplitter\n + \n===========changed ref 30===========\n + # module: app.backend.prepdocslib.integratedvectorizerstrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 31===========\n + # module: app.backend.prepdocslib.filestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 32===========\n + # module: app.backend.prepdocs\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 33===========\n + # module: app.backend.prepdocslib.blobmanager\n + class BlobManager:\n + @classmethod\n + def blob_name_from_file_name(cls, filename) -> str:\n + return os.path.basename(filename)\n + \n===========changed ref 34===========\n + # module: app.backend.prepdocslib.blobmanager\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 35===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class LocalListFileStrategy(ListFileStrategy):\n + def __init__(self, path_pattern: str):\n + self.path_pattern = path_pattern\n + \n===========changed ref 36===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 37===========\n + # module: app.backend.prepdocslib.textsplitter\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 38===========\n + # module: app.backend.prepdocslib.pdfparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 39===========\n + # module: app.backend.prepdocslib.htmlparser\n + logger = logging.getLogger(\"ingester\")\n + \n===========changed ref 40===========\n + # module: app.backend.prepdocslib.textparser\n + class TextParser(Parser):\n + \"\"\"Parses simple text into a Page object.\"\"\"\n + \n===========changed ref 41===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse:\n - return self.create_embedding_response\n - \n===========changed ref 42===========\n # module: tests.mocks\n + class MockClient:\n + def __init__(self, embeddings_client):\n + self.embeddings = embeddings_client\n + \n===========changed ref 43===========\n # module: tests.test_searchmanager\n - class MockClient:\n - def __init__(self, embeddings_client):\n - self.embeddings = embeddings_client\n - \n===========changed ref 44===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def close(self):\n + if self.content:\n + self.content.close()\n + \n===========changed ref 45===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def filename(self):\n + return os.path.basename(self.content.name)\n + \n===========changed ref 46===========\n + # module: app.backend.prepdocslib.textsplitter\n + class SimpleTextSplitter(TextSplitter):\n + def __init__(self, max_object_length: int = 1000):\n + self.max_object_length = max_object_length\n + \n===========changed ref 47===========\n + # module: app.backend.prepdocslib.strategy\n + class DocumentAction(Enum):\n + Add = 0\n + Remove = 1\n + RemoveAll = 2\n + \n===========changed ref 48===========\n # module: tests.mocks\n + class MockEmbeddingsClient:\n + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n + self.create_embedding_response = create_embedding_response\n + \n===========changed ref 49===========\n + # module: app.backend.prepdocslib.parser\n + class Parser(ABC):\n + \"\"\"\n + Abstract parser that parses content into Page objects\n + \"\"\"\n + \n===========changed ref 50===========\n # module: tests.test_searchmanager\n - class MockEmbeddingsClient:\n - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse):\n - self.create_embedding_response = create_embedding_response\n - \n===========changed ref 51===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None):\n + self.content = content\n + self.acls = acls or {}\n + \n===========changed ref 52===========\n + # module: app.backend.prepdocslib.embeddings\n + class ImageEmbeddings:\n + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]):\n + self.token_provider = token_provider\n + self.endpoint = endpoint\n + \n===========changed ref 53===========\n + # module: app.backend.prepdocslib.embeddings\n + class EmbeddingBatch:\n + def __init__(self, texts: List[str], token_length: int):\n + self.texts = texts\n + self.token_length = token_length\n + \n===========changed ref 54===========\n + # module: app.backend.prepdocslib.listfilestrategy\n + class File:\n + def file_extension(self):\n + return os.path.splitext(self.content.name)[1]\n + "}}},{"rowIdx":5889,"cells":{"path":{"kind":"string","value":"scripts.auth_update/main"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <4>: credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.environ[\"AZURE_TENANT_ID\"]))\n credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n <5>: \n scopes = [\"https://graph.microsoft.com/.default\"]\n graph_client = GraphServiceClient(credentials=credential, scopes=scopes)\n auth_headers = await get_auth_headers(credential)\n<10>: client_object_id = await get_application(graph_client, client_app_id)\n client_object_id = await get_application(auth_headers, client_app_id)\n<14>: app = Application(\n public_client=PublicClientApplication(redirect_uris=[]),\n spa=SpaApplication(\n payload = {\n<15>: \"publicClient\": {\"redirectUris\": []},\n<16>: \"spa\": {\n<17>: redirect_uris=[\n \"redirectUris\": [\n<22>: ),\n },\n<23>: web=WebApplication(\n \"web\": {\n<24>: redirect_uris=[\n \"redirectUris\": [\n<27>: ),\n },\n<28>: )\n }\n<29>: await graph_client.applications.by_application_id(client_object_id).patch(app)\n await update_application(auth_headers, client_object_id, payload)\n"},"main_code":{"kind":"string","value":" # module: scripts.auth_update\n def main():\n <0> if not test_authentication_enabled():\n <1> print(\"Not updating authentication.\")\n <2> exit(0)\n <3> \n <4> credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n <5> auth_headers = await get_auth_headers(credential)\n <6> \n <7> uri = os.getenv(\"BACKEND_URI\")\n <8> client_app_id = os.getenv(\"AZURE_CLIENT_APP_ID\", None)\n <9> if client_app_id:\n<10> client_object_id = await get_application(auth_headers, client_app_id)\n<11> if client_object_id:\n<12> print(f\"Updating redirect URIs for client app ID {client_app_id}...\")\n<13> # Redirect URIs need to be relative to the deployed application\n<14> payload = {\n<15> \"publicClient\": {\"redirectUris\": []},\n<16> \"spa\": {\n<17> \"redirectUris\": [\n<18> \"http://localhost:50505/redirect\",\n<19> \"http://localhost:5173/redirect\",\n<20> f\"{uri}/redirect\",\n<21> ]\n<22> },\n<23> \"web\": {\n<24> \"redirectUris\": [\n<25> f\"{uri}/.auth/login/aad/callback\",\n<26> ]\n<27> },\n<28> }\n<29> await update_application(auth_headers, client_object_id, payload)\n<30> print(f\"Application update for client app id {client_app_id} complete.\")\n<31> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: auth_common\n get_application(graph_client: GraphServiceClient, client_id: str) -> Optional[str]\n \n test_authentication_enabled()\n \n at: os\n environ = _createenviron()\n \n getenv(key: str, default: _T) -> Union[str, _T]\n getenv(key: str) -> Optional[str]\n \n "}}},{"rowIdx":5890,"cells":{"path":{"kind":"string","value":"scripts.auth_init/create_application"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <0>: async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1>: async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n <2>: if response.status != 201:\n <3>: raise Exception(await response.json())\n <4>: response_json = await response.json()\n <5>: object_id = response_json[\"id\"]\n <6>: client_id = response_json[\"appId\"]\n <7>: app = await graph_client.applications.post(request_app)\n object_id = app.id\n client_id = app.app_id\n <8>: async with session.post(\n <9>: \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n<10>: json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n<11>: ) as response:\n<12>: if response.status != 201:\n<13>: raise Exception(await response.json())\n<14>: \n<15>: # Create a service principal\n request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n await graph_client.service_principals.post(request_principal)\n"},"main_code":{"kind":"string","value":" # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1> async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n <2> if response.status != 201:\n <3> raise Exception(await response.json())\n <4> response_json = await response.json()\n <5> object_id = response_json[\"id\"]\n <6> client_id = response_json[\"appId\"]\n <7> \n <8> async with session.post(\n <9> \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n<10> json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n<11> ) as response:\n<12> if response.status != 201:\n<13> raise Exception(await response.json())\n<14> \n<15> return object_id, client_id\n<16> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.auth_update\n def main():\n if not test_authentication_enabled():\n print(\"Not updating authentication.\")\n exit(0)\n \n + credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.environ[\"AZURE_TENANT_ID\"]))\n - credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n + \n + scopes = [\"https://graph.microsoft.com/.default\"]\n + graph_client = GraphServiceClient(credentials=credential, scopes=scopes)\n - auth_headers = await get_auth_headers(credential)\n \n uri = os.getenv(\"BACKEND_URI\")\n client_app_id = os.getenv(\"AZURE_CLIENT_APP_ID\", None)\n if client_app_id:\n + client_object_id = await get_application(graph_client, client_app_id)\n - client_object_id = await get_application(auth_headers, client_app_id)\n if client_object_id:\n print(f\"Updating redirect URIs for client app ID {client_app_id}...\")\n # Redirect URIs need to be relative to the deployed application\n + app = Application(\n + public_client=PublicClientApplication(redirect_uris=[]),\n + spa=SpaApplication(\n - payload = {\n - \"publicClient\": {\"redirectUris\": []},\n - \"spa\": {\n + redirect_uris=[\n - \"redirectUris\": [\n \"http://localhost:50505/redirect\",\n \"http://localhost:5173/redirect\",\n f\"{uri}/redirect\",\n ]\n + ),\n - },\n + web=WebApplication(\n - \"web\": {\n + redirect_uris=[\n - \"redirectUris\": [\n f\"{uri}/.auth/login/aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await\n===========changed ref 1===========\n # module: scripts.auth_update\n def main():\n # offset: 1\n /aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await graph_client.applications.by_application_id(client_object_id).patch(app)\n - await update_application(auth_headers, client_object_id, payload)\n print(f\"Application update for client app id {client_app_id} complete.\")\n "}}},{"rowIdx":5891,"cells":{"path":{"kind":"string","value":"scripts.auth_init/add_client_secret"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <0>: async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1>: async with session.post(\n <2>: f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n <3>: json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n <4>: ) as response:\n <5>: response_json = await response.json()\n <6>: if response.status == 200:\n <7>: return response_json[\"secretText\"]\n <8>: request_password = AddPasswordPostRequestBody(\n password_credential=PasswordCredential(display_name=\"WebAppSecret\"),\n )\n result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password)\n return result.secret_text\n <9>: raise Exception(response_json)\n<10>: \n"},"main_code":{"kind":"string","value":" # module: scripts.auth_init\n + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str:\n - def add_client_secret(auth_headers: Dict[str, str], object_id: str):\n <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1> async with session.post(\n <2> f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n <3> json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n <4> ) as response:\n <5> response_json = await response.json()\n <6> if response.status == 200:\n <7> return response_json[\"secretText\"]\n <8> \n <9> raise Exception(response_json)\n<10> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - response_json = await response.json()\n - object_id = response_json[\"id\"]\n - client_id = response_json[\"appId\"]\n + app = await graph_client.applications.post(request_app)\n + object_id = app.id\n + client_id = app.app_id\n \n - async with session.post(\n - \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n - json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n - ) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - \n + # Create a service principal\n + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n + await graph_client.service_principals.post(request_principal)\n return object_id, client_id\n \n===========changed ref 1===========\n # module: scripts.auth_update\n def main():\n if not test_authentication_enabled():\n print(\"Not updating authentication.\")\n exit(0)\n \n + credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.environ[\"AZURE_TENANT_ID\"]))\n - credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n + \n + scopes = [\"https://graph.microsoft.com/.default\"]\n + graph_client = GraphServiceClient(credentials=credential, scopes=scopes)\n - auth_headers = await get_auth_headers(credential)\n \n uri = os.getenv(\"BACKEND_URI\")\n client_app_id = os.getenv(\"AZURE_CLIENT_APP_ID\", None)\n if client_app_id:\n + client_object_id = await get_application(graph_client, client_app_id)\n - client_object_id = await get_application(auth_headers, client_app_id)\n if client_object_id:\n print(f\"Updating redirect URIs for client app ID {client_app_id}...\")\n # Redirect URIs need to be relative to the deployed application\n + app = Application(\n + public_client=PublicClientApplication(redirect_uris=[]),\n + spa=SpaApplication(\n - payload = {\n - \"publicClient\": {\"redirectUris\": []},\n - \"spa\": {\n + redirect_uris=[\n - \"redirectUris\": [\n \"http://localhost:50505/redirect\",\n \"http://localhost:5173/redirect\",\n f\"{uri}/redirect\",\n ]\n + ),\n - },\n + web=WebApplication(\n - \"web\": {\n + redirect_uris=[\n - \"redirectUris\": [\n f\"{uri}/.auth/login/aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await\n===========changed ref 2===========\n # module: scripts.auth_update\n def main():\n # offset: 1\n /aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await graph_client.applications.by_application_id(client_object_id).patch(app)\n - await update_application(auth_headers, client_object_id, payload)\n print(f\"Application update for client app id {client_app_id} complete.\")\n "}}},{"rowIdx":5892,"cells":{"path":{"kind":"string","value":"scripts.auth_init/create_or_update_application_with_secret"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <5>: object_id = await get_application(graph_client, app_id)\n object_id = await get_application(auth_headers, app_id)\n <9>: await graph_client.applications.by_application_id(object_id).patch(request_app)\n await update_application(auth_headers, object_id, app_payload)\n<12>: object_id, app_id = await create_application(graph_client, request_app)\n object_id, app_id = await create_application(auth_headers, app_payload)\n<18>: client_secret = await add_client_secret(graph_client, object_id)\n client_secret = await add_client_secret(auth_headers, object_id)\n"},"main_code":{"kind":"string","value":" # module: scripts.auth_init\n def create_or_update_application_with_secret(\n + graph_client: GraphServiceClient, app_id_env_var: str, app_secret_env_var: str, request_app: Application\n - auth_headers: Dict[str, str], app_id_env_var: str, app_secret_env_var: str, app_payload: Dict[str, Any]\n ) -> Tuple[str, str, bool]:\n <0> app_id = os.getenv(app_id_env_var, \"no-id\")\n <1> created_app = False\n <2> object_id = None\n <3> if app_id != \"no-id\":\n <4> print(f\"Checking if application {app_id} exists\")\n <5> object_id = await get_application(auth_headers, app_id)\n <6> \n <7> if object_id:\n <8> print(\"Application already exists, not creating new one\")\n <9> await update_application(auth_headers, object_id, app_payload)\n<10> else:\n<11> print(\"Creating application registration\")\n<12> object_id, app_id = await create_application(auth_headers, app_payload)\n<13> update_azd_env(app_id_env_var, app_id)\n<14> created_app = True\n<15> \n<16> if object_id and os.getenv(app_secret_env_var, \"no-secret\") == \"no-secret\":\n<17> print(f\"Adding client secret to {app_id}\")\n<18> client_secret = await add_client_secret(auth_headers, object_id)\n<19> update_azd_env(app_secret_env_var, client_secret)\n<20> \n<21> return (object_id, app_id, created_app)\n<22> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.auth_init\n + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str:\n - def add_client_secret(auth_headers: Dict[str, str], object_id: str):\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\n - f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n - json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n - ) as response:\n - response_json = await response.json()\n - if response.status == 200:\n - return response_json[\"secretText\"]\n + request_password = AddPasswordPostRequestBody(\n + password_credential=PasswordCredential(display_name=\"WebAppSecret\"),\n + )\n + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password)\n + return result.secret_text\n \n - raise Exception(response_json)\n - \n===========changed ref 1===========\n # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - response_json = await response.json()\n - object_id = response_json[\"id\"]\n - client_id = response_json[\"appId\"]\n + app = await graph_client.applications.post(request_app)\n + object_id = app.id\n + client_id = app.app_id\n \n - async with session.post(\n - \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n - json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n - ) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - \n + # Create a service principal\n + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n + await graph_client.service_principals.post(request_principal)\n return object_id, client_id\n \n===========changed ref 2===========\n # module: scripts.auth_update\n def main():\n if not test_authentication_enabled():\n print(\"Not updating authentication.\")\n exit(0)\n \n + credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.environ[\"AZURE_TENANT_ID\"]))\n - credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n + \n + scopes = [\"https://graph.microsoft.com/.default\"]\n + graph_client = GraphServiceClient(credentials=credential, scopes=scopes)\n - auth_headers = await get_auth_headers(credential)\n \n uri = os.getenv(\"BACKEND_URI\")\n client_app_id = os.getenv(\"AZURE_CLIENT_APP_ID\", None)\n if client_app_id:\n + client_object_id = await get_application(graph_client, client_app_id)\n - client_object_id = await get_application(auth_headers, client_app_id)\n if client_object_id:\n print(f\"Updating redirect URIs for client app ID {client_app_id}...\")\n # Redirect URIs need to be relative to the deployed application\n + app = Application(\n + public_client=PublicClientApplication(redirect_uris=[]),\n + spa=SpaApplication(\n - payload = {\n - \"publicClient\": {\"redirectUris\": []},\n - \"spa\": {\n + redirect_uris=[\n - \"redirectUris\": [\n \"http://localhost:50505/redirect\",\n \"http://localhost:5173/redirect\",\n f\"{uri}/redirect\",\n ]\n + ),\n - },\n + web=WebApplication(\n - \"web\": {\n + redirect_uris=[\n - \"redirectUris\": [\n f\"{uri}/.auth/login/aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await\n===========changed ref 3===========\n # module: scripts.auth_update\n def main():\n # offset: 1\n /aad/callback\",\n ]\n + ),\n - },\n + )\n - }\n + await graph_client.applications.by_application_id(client_object_id).patch(app)\n - await update_application(auth_headers, client_object_id, payload)\n print(f\"Application update for client app id {client_app_id} complete.\")\n "}}},{"rowIdx":5893,"cells":{"path":{"kind":"string","value":"scripts.auth_init/main"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <5>: credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.environ[\"AZURE_TENANT_ID\"]))\n credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n <6>: \n scopes = [\"https://graph.microsoft.com/.default\"]\n graph_client = GraphServiceClient(credentials=credential, scopes=scopes)\n auth_headers = await get_auth_headers(credential)\n<10>: graph_client,\n auth_headers,\n<13>: request_app=server_app_initial(app_identifier),\n app_payload=create_server_app_initial_payload(app_identifier),\n<16>: server_app_permission = server_app_permission_setup(server_app_id)\n server_app_permission_payload = create_server_app_permission_setup_payload(server_app_id)\n<17>: await graph_client.applications.by_application_id(server_object_id).patch(server_app_permission)\n \n await update_application(auth_headers, object_id=server_object_id, app_payload=server_app_permission_payload)\n<19>: graph_client,\n auth_headers,\n<22>: request_app=client_app(server_app_id, server_app_permission, app_identifier),\n app_payload=create_client_app_payload(server_app_id, server_app_permission_payload, app_identifier),\n<24>: \n<25>: await update_application(\n<26>: auth_headers,\n<27>: object_id=server_object_id,\n<28>: await graph_client.applications.by_application_id(server_object"},"main_code":{"kind":"string","value":" # module: scripts.auth_init\n def main():\n <0> if not test_authentication_enabled():\n <1> print(\"Not setting up authentication.\")\n <2> exit(0)\n <3> \n <4> print(\"Setting up authentication...\")\n <5> credential = AzureDeveloperCliCredential(tenant_id=os.getenv(\"AZURE_AUTH_TENANT_ID\", os.getenv(\"AZURE_TENANT_ID\")))\n <6> auth_headers = await get_auth_headers(credential)\n <7> \n <8> app_identifier = random_app_identifier()\n <9> server_object_id, server_app_id, _ = await create_or_update_application_with_secret(\n<10> auth_headers,\n<11> app_id_env_var=\"AZURE_SERVER_APP_ID\",\n<12> app_secret_env_var=\"AZURE_SERVER_APP_SECRET\",\n<13> app_payload=create_server_app_initial_payload(app_identifier),\n<14> )\n<15> print(\"Setting up server application permissions...\")\n<16> server_app_permission_payload = create_server_app_permission_setup_payload(server_app_id)\n<17> await update_application(auth_headers, object_id=server_object_id, app_payload=server_app_permission_payload)\n<18> _, client_app_id, _ = await create_or_update_application_with_secret(\n<19> auth_headers,\n<20> app_id_env_var=\"AZURE_CLIENT_APP_ID\",\n<21> app_secret_env_var=\"AZURE_CLIENT_APP_SECRET\",\n<22> app_payload=create_client_app_payload(server_app_id, server_app_permission_payload, app_identifier),\n<23> )\n<24> print(\"Setting up server known client applications...\")\n<25> await update_application(\n<26> auth_headers,\n<27> object_id=server_object_id,\n<28> app_payload=create_server_app_known_client_application_payload(client_app_id),\n<29> )\n<30> print(\"Authentication setup complete.\")\n<31> \n "},"context":{"kind":"string","value":"===========changed ref 0===========\n # module: scripts.auth_init\n - def create_server_app_known_client_application_payload(client_app_id: str):\n - return {\n - \"api\": {\n - \"knownClientApplications\": [client_app_id],\n - }\n - }\n - \n===========changed ref 1===========\n # module: scripts.auth_init\n + def server_app_known_client_application(client_app_id: str) -> Application:\n + return Application(\n + api=ApiApplication(\n + known_client_applications=[client_app_id],\n + )\n + )\n + \n===========changed ref 2===========\n # module: scripts.auth_init\n - def create_server_app_initial_payload(identifier: int):\n - return {\n - \"displayName\": f\"Azure Search OpenAI Chat Server App {identifier}\",\n - \"signInAudience\": \"AzureADMyOrg\",\n - }\n - \n===========changed ref 3===========\n # module: scripts.auth_init\n + def server_app_initial(identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Server App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + )\n + \n===========changed ref 4===========\n # module: scripts.auth_init\n + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Client App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + web=WebApplication(\n + redirect_uris=[\"http://localhost:50505/.auth/login/aad/callback\"],\n + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True),\n + ),\n + spa=SpaApplication(redirect_uris=[\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]),\n + required_resource_access=[\n + RequiredResourceAccess(\n + resource_app_id=server_app_id,\n + resource_access=[\n + ResourceAccess(\n + id=server_app.api.oauth2_permission_scopes[0].id,\n + type=\"Scope\",\n + )\n + ],\n + ),\n + # Graph User.Read\n + RequiredResourceAccess(\n + resource_app_id=\"00000003-0000-0000-c000-000000000000\",\n + resource_access=[\n + ResourceAccess(id=\"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", type=\"Scope\"),\n + ],\n + ),\n + ],\n + )\n + \n===========changed ref 5===========\n # module: scripts.auth_init\n + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str:\n - def add_client_secret(auth_headers: Dict[str, str], object_id: str):\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\n - f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n - json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n - ) as response:\n - response_json = await response.json()\n - if response.status == 200:\n - return response_json[\"secretText\"]\n + request_password = AddPasswordPostRequestBody(\n + password_credential=PasswordCredential(display_name=\"WebAppSecret\"),\n + )\n + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password)\n + return result.secret_text\n \n - raise Exception(response_json)\n - \n===========changed ref 6===========\n # module: scripts.auth_init\n - def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int):\n - return {\n - \"displayName\": f\"Azure Search OpenAI Chat Client App {identifier}\",\n - \"signInAudience\": \"AzureADMyOrg\",\n - \"web\": {\n - \"redirectUris\": [\"http://localhost:50505/.auth/login/aad/callback\"],\n - \"implicitGrantSettings\": {\"enableIdTokenIssuance\": True},\n - },\n - \"spa\": {\"redirectUris\": [\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]},\n - \"requiredResourceAccess\": [\n - # access_as_user from server app\n - {\n - \"resourceAppId\": server_app_id,\n - \"resourceAccess\": [\n - {\n - \"id\": server_app_permission_setup_payload[\"api\"][\"oauth2PermissionScopes\"][0][\"id\"],\n - \"type\": \"Scope\",\n - }\n - ],\n - },\n - # Graph User.Read\n - {\n - \"resourceAppId\": \"00000003-0000-0000-c000-000000000000\",\n - \"resourceAccess\": [{\"id\": \"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", \"type\": \"Scope\"}],\n - },\n - ],\n - }\n - \n===========changed ref 7===========\n # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - response_json = await response.json()\n - object_id = response_json[\"id\"]\n - client_id = response_json[\"appId\"]\n + app = await graph_client.applications.post(request_app)\n + object_id = app.id\n + client_id = app.app_id\n \n - async with session.post(\n - \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n - json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n - ) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - \n + # Create a service principal\n + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n + await graph_client.service_principals.post(request_principal)\n return object_id, client_id\n "}}},{"rowIdx":5894,"cells":{"path":{"kind":"string","value":"app.backend.core.authentication/AuthenticationHelper.validate_access_token"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":""},"main_code":{"kind":"string","value":" # module: app.backend.core.authentication\n class AuthenticationHelper:\n def validate_access_token(self, token: str):\n <0> \"\"\"\n <1> Validate an access token is issued by Entra\n <2> \"\"\"\n <3> jwks = None\n <4> async for attempt in AsyncRetrying(\n <5> retry=retry_if_exception_type(AuthError),\n <6> wait=wait_random_exponential(min=15, max=60),\n <7> stop=stop_after_attempt(5),\n <8> ):\n <9> with attempt:\n<10> async with aiohttp.ClientSession() as session:\n<11> async with session.get(url=self.key_url) as resp:\n<12> resp_status = resp.status\n<13> if resp_status in [500, 502, 503, 504]:\n<14> raise AuthError(\n<15> error=f\"Failed to get keys info: {await resp.text()}\", status_code=resp_status\n<16> )\n<17> jwks = await resp.json()\n<18> \n<19> if not jwks or \"keys\" not in jwks:\n<20> raise AuthError({\"code\": \"invalid_keys\", \"description\": \"Unable to get keys to validate auth token.\"}, 401)\n<21> \n<22> rsa_key = None\n<23> issuer = None\n<24> audience = None\n<25> try:\n<26> unverified_header = jwt.get_unverified_header(token)\n<27> unverified_claims = jwt.get_unverified_claims(token)\n<28> issuer = unverified_claims.get(\"iss\")\n<29> audience = unverified_claims.get(\"aud\")\n<30> for key in jwks[\"keys\"]:\n<31> if key[\"kid\"] == unverified_header[\"kid\"]:\n<32> rsa_key = {\"kty\": key[\"kty\"], \"kid\": key[\"kid\"], \"use\": key[\"use\"], \"n\": key[\"n\"], \"e\": key[\"e\"]}\n<33> break\n<34> except Exception as exc:\n<35> raise AuthError(\n<36> {\"code\": \"invalid_header\", \"description\": \"Unable to parse authorization token.\"}, 401\n<37> ) from exc\n "},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: app.backend.core.authentication\n class AuthenticationHelper:\n def validate_access_token(self, token: str):\n # offset: 1\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Unable to find appropriate key\"}, 401)\n \n if issuer not in self.valid_issuers:\n raise AuthError(\n {\"code\": \"invalid_header\", \"description\": f\"Issuer {issuer} not in {','.join(self.valid_issuers)}\"}, 401\n )\n \n if audience not in self.valid_audiences:\n raise AuthError(\n {\n \"code\": \"invalid_header\",\n \"description\": f\"Audience {audience} not in {','.join(self.valid_audiences)}\",\n },\n 401,\n )\n \n try:\n jwt.decode(token, rsa_key, algorithms=[\"RS256\"], audience=audience, issuer=issuer)\n except jwt.ExpiredSignatureError as jwt_expired_exc:\n raise AuthError({\"code\": \"token_expired\", \"description\": \"token is expired\"}, 401) from jwt_expired_exc\n except jwt.JWTClaimsError as jwt_claims_exc:\n raise AuthError(\n {\"code\": \"invalid_claims\", \"description\": \"incorrect claims,\" \"please check the audience and issuer\"},\n 401,\n ) from jwt_claims_exc\n except Exception as exc:\n raise AuthError(\n {\"code\": \"invalid_header\", \"description\": \"Unable to parse authorization token.\"}, 401\n ) from exc\n \n \n===========changed ref 0===========\n # module: scripts.auth_init\n + def server_app_known_client_application(client_app_id: str) -> Application:\n + return Application(\n + api=ApiApplication(\n + known_client_applications=[client_app_id],\n + )\n + )\n + \n===========changed ref 1===========\n # module: scripts.auth_init\n - def create_server_app_known_client_application_payload(client_app_id: str):\n - return {\n - \"api\": {\n - \"knownClientApplications\": [client_app_id],\n - }\n - }\n - \n===========changed ref 2===========\n # module: scripts.auth_init\n + def server_app_initial(identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Server App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + )\n + \n===========changed ref 3===========\n # module: scripts.auth_init\n - def create_server_app_initial_payload(identifier: int):\n - return {\n - \"displayName\": f\"Azure Search OpenAI Chat Server App {identifier}\",\n - \"signInAudience\": \"AzureADMyOrg\",\n - }\n - \n===========changed ref 4===========\n # module: scripts.auth_init\n + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str:\n - def add_client_secret(auth_headers: Dict[str, str], object_id: str):\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\n - f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n - json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n - ) as response:\n - response_json = await response.json()\n - if response.status == 200:\n - return response_json[\"secretText\"]\n + request_password = AddPasswordPostRequestBody(\n + password_credential=PasswordCredential(display_name=\"WebAppSecret\"),\n + )\n + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password)\n + return result.secret_text\n \n - raise Exception(response_json)\n - \n===========changed ref 5===========\n # module: scripts.auth_init\n + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Client App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + web=WebApplication(\n + redirect_uris=[\"http://localhost:50505/.auth/login/aad/callback\"],\n + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True),\n + ),\n + spa=SpaApplication(redirect_uris=[\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]),\n + required_resource_access=[\n + RequiredResourceAccess(\n + resource_app_id=server_app_id,\n + resource_access=[\n + ResourceAccess(\n + id=server_app.api.oauth2_permission_scopes[0].id,\n + type=\"Scope\",\n + )\n + ],\n + ),\n + # Graph User.Read\n + RequiredResourceAccess(\n + resource_app_id=\"00000003-0000-0000-c000-000000000000\",\n + resource_access=[\n + ResourceAccess(id=\"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", type=\"Scope\"),\n + ],\n + ),\n + ],\n + )\n + \n===========changed ref 6===========\n # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - response_json = await response.json()\n - object_id = response_json[\"id\"]\n - client_id = response_json[\"appId\"]\n + app = await graph_client.applications.post(request_app)\n + object_id = app.id\n + client_id = app.app_id\n \n - async with session.post(\n - \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n - json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n - ) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - \n + # Create a service principal\n + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n + await graph_client.service_principals.post(request_principal)\n return object_id, client_id\n "}}},{"rowIdx":5895,"cells":{"path":{"kind":"string","value":"scripts.auth_common/get_application"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"a2df481dde933157a5a309220a4de78a9f1463b5"},"commit_message":{"kind":"string","value":"Port to the Graph SDK for authentication scripts (#1510)"},"ground_truth":{"kind":"string","value":" <0>: try:\n app = await graph_client.applications_with_app_id(client_id).get()\n return app.id\n except APIError:\n return None\n async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1>: async with session.get(f\"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')\") as response:\n <2>: if response.status == 200:\n <3>: response_json = await response.json()\n <4>: return response_json[\"id\"]\n <6>: return None\n <7>: \n"},"main_code":{"kind":"string","value":" # module: scripts.auth_common\n + def get_application(graph_client: GraphServiceClient, client_id: str) -> Optional[str]:\n - def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]:\n <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n <1> async with session.get(f\"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')\") as response:\n <2> if response.status == 200:\n <3> response_json = await response.json()\n <4> return response_json[\"id\"]\n <5> \n <6> return None\n <7> \n "},"context":{"kind":"string","value":"===========unchanged ref 0===========\n at: os\n getenv(key: str, default: _T) -> Union[str, _T]\n getenv(key: str) -> Optional[str]\n \n \n===========changed ref 0===========\n # module: scripts.auth_common\n - TIMEOUT = 60\n - \n===========changed ref 1===========\n # module: scripts.auth_common\n - def get_auth_headers(credential: AsyncTokenCredential):\n - token_result = await credential.get_token(\"https://graph.microsoft.com/.default\")\n - return {\"Authorization\": f\"Bearer {token_result.token}\"}\n - \n===========changed ref 2===========\n # module: scripts.auth_init\n + def server_app_known_client_application(client_app_id: str) -> Application:\n + return Application(\n + api=ApiApplication(\n + known_client_applications=[client_app_id],\n + )\n + )\n + \n===========changed ref 3===========\n # module: scripts.auth_init\n - def create_server_app_known_client_application_payload(client_app_id: str):\n - return {\n - \"api\": {\n - \"knownClientApplications\": [client_app_id],\n - }\n - }\n - \n===========changed ref 4===========\n # module: scripts.auth_init\n + def server_app_initial(identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Server App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + )\n + \n===========changed ref 5===========\n # module: scripts.auth_init\n - def create_server_app_initial_payload(identifier: int):\n - return {\n - \"displayName\": f\"Azure Search OpenAI Chat Server App {identifier}\",\n - \"signInAudience\": \"AzureADMyOrg\",\n - }\n - \n===========changed ref 6===========\n # module: scripts.auth_init\n + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str:\n - def add_client_secret(auth_headers: Dict[str, str], object_id: str):\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\n - f\"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword\",\n - json={\"passwordCredential\": {\"displayName\": \"secret\"}},\n - ) as response:\n - response_json = await response.json()\n - if response.status == 200:\n - return response_json[\"secretText\"]\n + request_password = AddPasswordPostRequestBody(\n + password_credential=PasswordCredential(display_name=\"WebAppSecret\"),\n + )\n + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password)\n + return result.secret_text\n \n - raise Exception(response_json)\n - \n===========changed ref 7===========\n # module: scripts.auth_init\n + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application:\n + return Application(\n + display_name=f\"Azure Search OpenAI Chat Client App {identifier}\",\n + sign_in_audience=\"AzureADMyOrg\",\n + web=WebApplication(\n + redirect_uris=[\"http://localhost:50505/.auth/login/aad/callback\"],\n + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True),\n + ),\n + spa=SpaApplication(redirect_uris=[\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]),\n + required_resource_access=[\n + RequiredResourceAccess(\n + resource_app_id=server_app_id,\n + resource_access=[\n + ResourceAccess(\n + id=server_app.api.oauth2_permission_scopes[0].id,\n + type=\"Scope\",\n + )\n + ],\n + ),\n + # Graph User.Read\n + RequiredResourceAccess(\n + resource_app_id=\"00000003-0000-0000-c000-000000000000\",\n + resource_access=[\n + ResourceAccess(id=\"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", type=\"Scope\"),\n + ],\n + ),\n + ],\n + )\n + \n===========changed ref 8===========\n # module: scripts.auth_init\n + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]:\n - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]:\n - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:\n - async with session.post(\"https://graph.microsoft.com/v1.0/applications\", json=app_payload) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - response_json = await response.json()\n - object_id = response_json[\"id\"]\n - client_id = response_json[\"appId\"]\n + app = await graph_client.applications.post(request_app)\n + object_id = app.id\n + client_id = app.app_id\n \n - async with session.post(\n - \"https://graph.microsoft.com/v1.0/servicePrincipals\",\n - json={\"appId\": client_id, \"displayName\": app_payload[\"displayName\"]},\n - ) as response:\n - if response.status != 201:\n - raise Exception(await response.json())\n - \n + # Create a service principal\n + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name)\n + await graph_client.service_principals.post(request_principal)\n return object_id, client_id\n \n===========changed ref 9===========\n # module: scripts.auth_init\n - def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int):\n - return {\n - \"displayName\": f\"Azure Search OpenAI Chat Client App {identifier}\",\n - \"signInAudience\": \"AzureADMyOrg\",\n - \"web\": {\n - \"redirectUris\": [\"http://localhost:50505/.auth/login/aad/callback\"],\n - \"implicitGrantSettings\": {\"enableIdTokenIssuance\": True},\n - },\n - \"spa\": {\"redirectUris\": [\"http://localhost:50505/redirect\", \"http://localhost:5173/redirect\"]},\n - \"requiredResourceAccess\": [\n - # access_as_user from server app\n - {\n - \"resourceAppId\": server_app_id,\n - \"resourceAccess\": [\n - {\n - \"id\": server_app_permission_setup_payload[\"api\"][\"oauth2PermissionScopes\"][0][\"id\"],\n - \"type\": \"Scope\",\n - }\n - ],\n - },\n - # Graph User.Read\n - {\n - \"resourceAppId\": \"00000003-0000-0000-c000-000000000000\",\n - \"resourceAccess\": [{\"id\": \"e1fe6dd8-ba31-4d61-89e7-88639da4683d\", \"type\": \"Scope\"}],\n - },\n - ],\n - }\n - "}}},{"rowIdx":5896,"cells":{"path":{"kind":"string","value":"tests.test_upload/test_upload_file"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"0124725ad85726aa3f62450e16fb136a63154f69"},"commit_message":{"kind":"string","value":"Adds storageURL field to track file location (#1535)"},"ground_truth":{"kind":"string","value":"<22>: path = kwargs.get(\"file\")\n<23>: if path in self.files:\n<24>: return self.files[path]\n<25>: self.files[path] = DataLakeFileClient(path)\n<26>: return self.files[path]\n<27>: return azure.storage.filedatalake.aio.DataLakeFileClient(\n account_url=\"https://test.blob.core.windows.net/\", file_system_name=\"user-content\", file_path=args[0]\n )\n"},"main_code":{"kind":"string","value":" # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n <0> async def mock_get_directory_properties(self, *args, **kwargs):\n <1> if directory_exists:\n <2> return None\n <3> else:\n <4> raise azure.core.exceptions.ResourceNotFoundError()\n <5> \n <6> monkeypatch.setattr(DataLakeDirectoryClient, \"get_directory_properties\", mock_get_directory_properties)\n <7> \n <8> directory_created = [False]\n <9> \n<10> async def mock_create_directory(self, *args, **kwargs):\n<11> directory_created[0] = True\n<12> \n<13> monkeypatch.setattr(DataLakeDirectoryClient, \"create_directory\", mock_create_directory)\n<14> \n<15> async def mock_directory_set_access_control(self, *args, **kwargs):\n<16> assert kwargs.get(\"owner\") == \"OID_X\"\n<17> return None\n<18> \n<19> monkeypatch.setattr(DataLakeDirectoryClient, \"set_access_control\", mock_directory_set_access_control)\n<20> \n<21> def mock_directory_get_file_client(self, *args, **kwargs):\n<22> path = kwargs.get(\"file\")\n<23> if path in self.files:\n<24> return self.files[path]\n<25> self.files[path] = DataLakeFileClient(path)\n<26> return self.files[path]\n<27> \n<28> monkeypatch.setattr(DataLakeDirectoryClient, \"get_file_client\", mock_directory_get_file_client)\n<29> \n<30> async def mock_upload_file(self, *args, **kwargs):\n<31> assert kwargs.get(\"overwrite\") is True\n<32> assert kwargs.get(\"metadata\") == {\"UploadedBy\": \"OID_X\"}\n<33> return None\n<34> \n<35> monkeypatch.setattr(DataLakeFileClient, \"upload_data\","},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n # offset: 1\n \n async def mock_create_client(self, *args, **kwargs):\n # From https://platform.openai.com/docs/api-reference/embeddings/create\n return MockClient(\n embeddings_client=MockEmbeddingsClient(\n create_embedding_response=CreateEmbeddingResponse(\n object=\"list\",\n data=[\n Embedding(\n embedding=[\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ],\n index=0,\n object=\"embedding\",\n )\n ],\n model=\"text-embedding-ada-002\",\n usage=Usage(prompt_tokens=8, total_tokens=8),\n )\n )\n )\n \n documents_uploaded = []\n \n async def mock_upload_documents(self, documents):\n documents_uploaded.extend(documents)\n \n monkeypatch.setattr(SearchClient, \"upload_documents\", mock_upload_documents)\n monkeypatch.setattr(AzureOpenAIEmbeddingService, \"create_client\", mock_create_client)\n \n response = await auth_client.post(\n \"/upload\",\n headers={\"Authorization\": \"Bearer test\"},\n files={\"file\": FileStorage(BytesIO(b\"foo;bar\"), filename=\"a.txt\")},\n )\n message = (await response.get_json())[\"message\"]\n assert message == \"File uploaded successfully\"\n assert response.status_code == 200\n assert len(documents_uploaded) == 1\n assert documents_uploaded[0][\"id\"] == \"file-a_txt-612E7478747B276F696473273A205B274F49445F58275D7D-\n===========below chunk 1===========\n # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n # offset: 2\n E7478747B276F696473273A205B274F49445F58275D7D-page-0\"\n assert documents_uploaded[0][\"sourcepage\"] == \"a.txt\"\n assert documents_uploaded[0][\"sourcefile\"] == \"a.txt\"\n assert documents_uploaded[0][\"embedding\"] == [0.0023064255, -0.009327292, -0.0028842222]\n assert documents_uploaded[0][\"category\"] is None\n assert documents_uploaded[0][\"oids\"] == [\"OID_X\"]\n assert directory_created[0] == (not directory_exists)\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.mark.structures.MarkGenerator\n skip: _SkipMarkDecorator\n \n skipif: _SkipifMarkDecorator\n \n xfail: _XfailMarkDecorator\n \n parametrize: _ParametrizeMarkDecorator\n \n usefixtures: _UsefixturesMarkDecorator\n \n filterwarnings: _FilterwarningsMarkDecorator\n \n at: io\n BytesIO(initial_bytes: bytes=...)\n \n at: tests.mocks\n MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse)\n \n MockClient(embeddings_client)\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n "}}},{"rowIdx":5897,"cells":{"path":{"kind":"string","value":"tests.test_upload/test_delete_uploaded"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"0124725ad85726aa3f62450e16fb136a63154f69"},"commit_message":{"kind":"string","value":"Adds storageURL field to track file location (#1535)"},"ground_truth":{"kind":"string","value":" <4>: \n def mock_directory_get_file_client(self, *args, **kwargs):\n return azure.storage.filedatalake.aio.DataLakeFileClient(\n account_url=\"https://test.blob.core.windows.net/\", file_system_name=\"user-content\", file_path=args[0]\n )\n \n monkeypatch.setattr(DataLakeDirectoryClient, \"get_file_client\", mock_directory_get_file_client)\n"},"main_code":{"kind":"string","value":" # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n <0> async def mock_delete_file(self):\n <1> return None\n <2> \n <3> monkeypatch.setattr(DataLakeFileClient, \"delete_file\", mock_delete_file)\n <4> \n <5> class AsyncSearchResultsIterator:\n <6> def __init__(self):\n <7> self.results = [\n <8> {\n <9> \"sourcepage\": \"a.txt\",\n<10> \"sourcefile\": \"a.txt\",\n<11> \"content\": \"This is a test document.\",\n<12> \"embedding\": [],\n<13> \"category\": None,\n<14> \"id\": \"file-a_txt-7465737420646F63756D656E742E706466\",\n<15> \"oids\": [\"OID_X\"],\n<16> \"@search.score\": 0.03279569745063782,\n<17> \"@search.reranker_score\": 3.4577205181121826,\n<18> },\n<19> {\n<20> \"sourcepage\": \"a.txt\",\n<21> \"sourcefile\": \"a.txt\",\n<22> \"content\": \"This is a test document.\",\n<23> \"embedding\": [],\n<24> \"category\": None,\n<25> \"id\": \"file-a_txt-7465737420646F63756D656E742E706422\",\n<26> \"oids\": [],\n<27> \"@search.score\": 0.03279569745063782,\n<28> \"@search.reranker_score\": 3.4577205181121826,\n<29> },\n<30> {\n<31> \"sourcepage\": \"a.txt\",\n<32> \"sourcefile\": \"a.txt\",\n<33> \"content\": \"This is a test document.\",\n<34> \"embedding\": [],\n<35> \"category\": None,\n<36> \"id\": \"file-a_txt-7465737420646F63756D656E742E"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n # offset: 1\n \"oids\": [\"OID_X\", \"OID_Y\"],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n },\n ]\n \n def __aiter__(self):\n return self\n \n async def __anext__(self):\n if len(self.results) == 0:\n raise StopAsyncIteration\n return self.results.pop()\n \n async def get_count(self):\n return len(self.results)\n \n search_results = AsyncSearchResultsIterator()\n \n searched_filters = []\n \n async def mock_search(self, *args, **kwargs):\n self.filter = kwargs.get(\"filter\")\n searched_filters.append(self.filter)\n return search_results\n \n monkeypatch.setattr(SearchClient, \"search\", mock_search)\n \n deleted_documents = []\n \n async def mock_delete_documents(self, documents):\n deleted_documents.extend(documents)\n return documents\n \n monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n \n response = await auth_client.post(\n \"/delete_uploaded\", headers={\"Authorization\": \"Bearer test\"}, json={\"filename\": \"a.txt\"}\n )\n assert response.status_code == 200\n assert len(searched_filters) == 2, \"It should have searched twice (with no results on second try)\"\n assert searched_filters[0] == \"sourcefile eq 'a.txt'\"\n assert len(deleted_documents) == 1, \"It should have only deleted the document solely owned by OID_X\"\n assert deleted_documents[0][\"id\"] == \"file-a_txt-7465737420646F63756D656E742E706466\"\n \n \n===========unchanged ref 0===========\n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n async def mock_get_directory_properties(self, *args, **kwargs):\n if directory_exists:\n return None\n else:\n raise azure.core.exceptions.ResourceNotFoundError()\n \n monkeypatch.setattr(DataLakeDirectoryClient, \"get_directory_properties\", mock_get_directory_properties)\n \n directory_created = [False]\n \n async def mock_create_directory(self, *args, **kwargs):\n directory_created[0] = True\n \n monkeypatch.setattr(DataLakeDirectoryClient, \"create_directory\", mock_create_directory)\n \n async def mock_directory_set_access_control(self, *args, **kwargs):\n assert kwargs.get(\"owner\") == \"OID_X\"\n return None\n \n monkeypatch.setattr(DataLakeDirectoryClient, \"set_access_control\", mock_directory_set_access_control)\n \n def mock_directory_get_file_client(self, *args, **kwargs):\n - path = kwargs.get(\"file\")\n - if path in self.files:\n - return self.files[path]\n - self.files[path] = DataLakeFileClient(path)\n - return self.files[path]\n + return azure.storage.filedatalake.aio.DataLakeFileClient(\n + account_url=\"https://test.blob.core.windows.net/\", file_system_name=\"user-content\", file_path=args[0]\n + )\n \n monkeypatch.setattr(DataLakeDirectoryClient, \"get_file_client\", mock_directory_get_file_client)\n \n async def mock_upload_file(self, *args, **kwargs):\n assert kwargs.get(\"overwrite\") is True\n assert kwargs.get(\"metadata\") == {\"Uploaded\n===========changed ref 1===========\n # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n # offset: 1\n , *args, **kwargs):\n assert kwargs.get(\"overwrite\") is True\n assert kwargs.get(\"metadata\") == {\"UploadedBy\": \"OID_X\"}\n return None\n \n monkeypatch.setattr(DataLakeFileClient, \"upload_data\", mock_upload_file)\n \n async def mock_create_client(self, *args, **kwargs):\n # From https://platform.openai.com/docs/api-reference/embeddings/create\n return MockClient(\n embeddings_client=MockEmbeddingsClient(\n create_embedding_response=CreateEmbeddingResponse(\n object=\"list\",\n data=[\n Embedding(\n embedding=[\n 0.0023064255,\n -0.009327292,\n -0.0028842222,\n ],\n index=0,\n object=\"embedding\",\n )\n ],\n model=\"text-embedding-ada-002\",\n usage=Usage(prompt_tokens=8, total_tokens=8),\n )\n )\n )\n \n documents_uploaded = []\n \n async def mock_upload_documents(self, documents):\n documents_uploaded.extend(documents)\n \n monkeypatch.setattr(SearchClient, \"upload_documents\", mock_upload_documents)\n monkeypatch.setattr(AzureOpenAIEmbeddingService, \"create_client\", mock_create_client)\n \n response = await auth_client.post(\n \"/upload\",\n headers={\"Authorization\": \"Bearer test\"},\n files={\"file\": FileStorage(BytesIO(b\"foo;bar\"), filename=\"a.txt\")\n===========changed ref 2===========\n # module: tests.test_upload\n # parameterize for directory existing or not\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\"directory_exists\", [True, False])\n async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists):\n # offset: 2\n )\n message = (await response.get_json())[\"message\"]\n assert message == \"File uploaded successfully\"\n assert response.status_code == 200\n assert len(documents_uploaded) == 1\n assert documents_uploaded[0][\"id\"] == \"file-a_txt-612E7478747B276F696473273A205B274F49445F58275D7D-page-0\"\n assert documents_uploaded[0][\"sourcepage\"] == \"a.txt\"\n assert documents_uploaded[0][\"sourcefile\"] == \"a.txt\"\n assert documents_uploaded[0][\"embedding\"] == [0.0023064255, -0.009327292, -0.0028842222]\n assert documents_uploaded[0][\"category\"] is None\n assert documents_uploaded[0][\"oids\"] == [\"OID_X\"]\n assert directory_created[0] == (not directory_exists)\n "}}},{"rowIdx":5898,"cells":{"path":{"kind":"string","value":"tests.test_blob_manager/test_upload_and_remove"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"0124725ad85726aa3f62450e16fb136a63154f69"},"commit_message":{"kind":"string","value":"Adds storageURL field to track file location (#1535)"},"ground_truth":{"kind":"string","value":"<12>: return azure.storage.blob.aio.BlobClient.from_blob_url(\n \"https://test.blob.core.windows.net/test/test.pdf\", credential=MockAzureCredential()\n )\n return True\n<17>: assert f.url == \"https://test.blob.core.windows.net/test/test.pdf\"\n"},"main_code":{"kind":"string","value":" # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove(monkeypatch, mock_env, blob_manager):\n <0> with NamedTemporaryFile(suffix=\".pdf\") as temp_file:\n <1> f = File(temp_file.file)\n <2> filename = os.path.basename(f.content.name)\n <3> \n <4> # Set up mocks used by upload_blob\n <5> async def mock_exists(*args, **kwargs):\n <6> return True\n <7> \n <8> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.exists\", mock_exists)\n <9> \n<10> async def mock_upload_blob(self, name, *args, **kwargs):\n<11> assert name == filename\n<12> return True\n<13> \n<14> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.upload_blob\", mock_upload_blob)\n<15> \n<16> await blob_manager.upload_blob(f)\n<17> \n<18> # Set up mocks used by remove_blob\n<19> def mock_list_blob_names(*args, **kwargs):\n<20> assert kwargs.get(\"name_starts_with\") == filename.split(\".pdf\")[0]\n<21> \n<22> class AsyncBlobItemsIterator:\n<23> def __init__(self, file):\n<24> self.files = [file, \"dontdelete.pdf\"]\n<25> \n<26> def __aiter__(self):\n<27> return self\n<28> \n<29> async def __anext__(self):\n<30> if self.files:\n<31> return self.files.pop()\n<32> raise StopAsyncIteration\n<33> \n<34> return AsyncBlobItemsIterator(filename)\n<35> \n<36> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list_blob_names)\n<37> \n<38> async def mock_delete_blob(self, name, *args, **kwargs):\n<39> assert name == filename\n<40> return True\n<41> \n<42> mon"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove(monkeypatch, mock_env, blob_manager):\n # offset: 1\n \n await blob_manager.remove_blob(f.content.name)\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.mark.structures.MarkGenerator\n skip: _SkipMarkDecorator\n \n skipif: _SkipifMarkDecorator\n \n xfail: _XfailMarkDecorator\n \n parametrize: _ParametrizeMarkDecorator\n \n usefixtures: _UsefixturesMarkDecorator\n \n filterwarnings: _FilterwarningsMarkDecorator\n \n at: _pytest.monkeypatch\n monkeypatch() -> Generator[\"MonkeyPatch\", None, None]\n \n at: os.path\n basename(p: _PathLike[AnyStr]) -> AnyStr\n basename(p: AnyStr) -> AnyStr\n \n at: sys\n version_info: _version_info\n \n at: sys._version_info\n major: int\n \n minor: int\n \n micro: int\n \n releaselevel: str\n \n serial: int\n \n \n===========unchanged ref 1===========\n at: tempfile\n NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any]\n NamedTemporaryFile(mode: Literal[\"r\", \"w\", \"a\", \"x\", \"r+\", \"w+\", \"a+\", \"x+\", \"rt\", \"wt\", \"at\", \"xt\", \"r+t\", \"w+t\", \"a+t\", \"x+t\"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str]\n NamedTemporaryFile(mode: Literal[\"rb\", \"wb\", \"ab\", \"xb\", \"r+b\", \"w+b\", \"a+b\", \"x+b\"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes]\n \n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n async def mock_delete_file(self):\n return None\n \n monkeypatch.setattr(DataLakeFileClient, \"delete_file\", mock_delete_file)\n + \n + def mock_directory_get_file_client(self, *args, **kwargs):\n + return azure.storage.filedatalake.aio.DataLakeFileClient(\n + account_url=\"https://test.blob.core.windows.net/\", file_system_name=\"user-content\", file_path=args[0]\n + )\n + \n + monkeypatch.setattr(DataLakeDirectoryClient, \"get_file_client\", mock_directory_get_file_client)\n \n class AsyncSearchResultsIterator:\n def __init__(self):\n self.results = [\n {\n \"sourcepage\": \"a.txt\",\n \"sourcefile\": \"a.txt\",\n \"content\": \"This is a test document.\",\n \"embedding\": [],\n \"category\": None,\n \"id\": \"file-a_txt-7465737420646F63756D656E742E706466\",\n \"oids\": [\"OID_X\"],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n },\n {\n \"sourcepage\": \"a.txt\",\n \"sourcefile\": \"a.txt\",\n \"content\": \"This is a test document.\",\n \"embedding\": [],\n \"category\": None,\n \"id\": \"file-a_txt-7465737420646F63756D656E742E706422\",\n \"oids\": [],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n \n===========changed ref 1===========\n # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n # offset: 1\n 9745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n },\n {\n \"sourcepage\": \"a.txt\",\n \"sourcefile\": \"a.txt\",\n \"content\": \"This is a test document.\",\n \"embedding\": [],\n \"category\": None,\n \"id\": \"file-a_txt-7465737420646F63756D656E742E706433\",\n \"oids\": [\"OID_X\", \"OID_Y\"],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n },\n ]\n \n def __aiter__(self):\n return self\n \n async def __anext__(self):\n if len(self.results) == 0:\n raise StopAsyncIteration\n return self.results.pop()\n \n async def get_count(self):\n return len(self.results)\n \n search_results = AsyncSearchResultsIterator()\n \n searched_filters = []\n \n async def mock_search(self, *args, **kwargs):\n self.filter = kwargs.get(\"filter\")\n searched_filters.append(self.filter)\n return search_results\n \n monkeypatch.setattr(SearchClient, \"search\", mock_search)\n \n deleted_documents = []\n \n async def mock_delete_documents(self, documents):\n deleted_documents.extend(documents)\n return documents\n \n monkeypatch.setattr(SearchClient, \"delete_documents\", mock_delete_documents)\n \n response = await auth_client.post(\n \"/delete_uploaded\", headers={\"Authorization\n===========changed ref 2===========\n # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n # offset: 2\n Bearer test\"}, json={\"filename\": \"a.txt\"}\n )\n assert response.status_code == 200\n assert len(searched_filters) == 2, \"It should have searched twice (with no results on second try)\"\n assert searched_filters[0] == \"sourcefile eq 'a.txt'\"\n assert len(deleted_documents) == 1, \"It should have only deleted the document solely owned by OID_X\"\n assert deleted_documents[0][\"id\"] == \"file-a_txt-7465737420646F63756D656E742E706466\"\n "}}},{"rowIdx":5899,"cells":{"path":{"kind":"string","value":"tests.test_blob_manager/test_upload_and_remove_all"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"Azure-Samples~azure-search-openai-demo"},"commit_hash":{"kind":"string","value":"0124725ad85726aa3f62450e16fb136a63154f69"},"commit_message":{"kind":"string","value":"Adds storageURL field to track file location (#1535)"},"ground_truth":{"kind":"string","value":"<12>: return azure.storage.blob.aio.BlobClient.from_blob_url(\n \"https://test.blob.core.windows.net/test/test.pdf\", credential=MockAzureCredential()\n )\n return True\n<17>: assert f.url == \"https://test.blob.core.windows.net/test/test.pdf\"\n"},"main_code":{"kind":"string","value":" # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager):\n <0> with NamedTemporaryFile(suffix=\".pdf\") as temp_file:\n <1> f = File(temp_file.file)\n <2> filename = os.path.basename(f.content.name)\n <3> \n <4> # Set up mocks used by upload_blob\n <5> async def mock_exists(*args, **kwargs):\n <6> return True\n <7> \n <8> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.exists\", mock_exists)\n <9> \n<10> async def mock_upload_blob(self, name, *args, **kwargs):\n<11> assert name == filename\n<12> return True\n<13> \n<14> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.upload_blob\", mock_upload_blob)\n<15> \n<16> await blob_manager.upload_blob(f)\n<17> \n<18> # Set up mocks used by remove_blob\n<19> def mock_list_blob_names(*args, **kwargs):\n<20> assert kwargs.get(\"name_starts_with\") is None\n<21> \n<22> class AsyncBlobItemsIterator:\n<23> def __init__(self, file):\n<24> self.files = [file]\n<25> \n<26> def __aiter__(self):\n<27> return self\n<28> \n<29> async def __anext__(self):\n<30> if self.files:\n<31> return self.files.pop()\n<32> raise StopAsyncIteration\n<33> \n<34> return AsyncBlobItemsIterator(filename)\n<35> \n<36> monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list_blob_names)\n<37> \n<38> async def mock_delete_blob(self, name, *args, **kwargs):\n<39> assert name == filename\n<40> return True\n<41> \n<42> monkeypatch.setattr(\"azure.storage.blob.aio"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager):\n # offset: 1\n \n await blob_manager.remove_blob()\n \n \n===========unchanged ref 0===========\n at: _pytest.mark.structures\n MARK_GEN = MarkGenerator(_ispytest=True)\n \n at: _pytest.mark.structures.MarkGenerator\n skipif: _SkipifMarkDecorator\n \n at: os.path\n basename(p: _PathLike[AnyStr]) -> AnyStr\n basename(p: AnyStr) -> AnyStr\n \n at: sys\n version_info: _version_info\n \n at: sys._version_info\n minor: int\n \n at: tempfile\n NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any]\n NamedTemporaryFile(mode: Literal[\"r\", \"w\", \"a\", \"x\", \"r+\", \"w+\", \"a+\", \"x+\", \"rt\", \"wt\", \"at\", \"xt\", \"r+t\", \"w+t\", \"a+t\", \"x+t\"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str]\n NamedTemporaryFile(mode: Literal[\"rb\", \"wb\", \"ab\", \"xb\", \"r+b\", \"w+b\", \"a+b\", \"x+b\"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes]\n \n at: tests.test_blob_manager.test_upload_and_remove\n f = File(temp_file.file)\n \n \n===========unchanged ref 1===========\n at: typing.Mapping\n get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]\n get(key: _KT) -> Optional[_VT_co]\n \n \n===========changed ref 0===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove(monkeypatch, mock_env, blob_manager):\n with NamedTemporaryFile(suffix=\".pdf\") as temp_file:\n f = File(temp_file.file)\n filename = os.path.basename(f.content.name)\n \n # Set up mocks used by upload_blob\n async def mock_exists(*args, **kwargs):\n return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.exists\", mock_exists)\n \n async def mock_upload_blob(self, name, *args, **kwargs):\n assert name == filename\n + return azure.storage.blob.aio.BlobClient.from_blob_url(\n + \"https://test.blob.core.windows.net/test/test.pdf\", credential=MockAzureCredential()\n + )\n - return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.upload_blob\", mock_upload_blob)\n \n await blob_manager.upload_blob(f)\n + assert f.url == \"https://test.blob.core.windows.net/test/test.pdf\"\n \n # Set up mocks used by remove_blob\n def mock_list_blob_names(*args, **kwargs):\n assert kwargs.get(\"name_starts_with\") == filename.split(\".pdf\")[0]\n \n class AsyncBlobItemsIterator:\n def __init__(self, file):\n self.files = [file, \"dontdelete.pdf\"]\n \n def __aiter__(self):\n return self\n \n async def __anext__(self):\n if self.files:\n return self.files.pop()\n raise StopAsyncIteration\n \n return AsyncBlobItemsIterator(filename)\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list\n===========changed ref 1===========\n # module: tests.test_blob_manager\n @pytest.mark.asyncio\n @pytest.mark.skipif(sys.version_info.minor < 10, reason=\"requires Python 3.10 or higher\")\n async def test_upload_and_remove(monkeypatch, mock_env, blob_manager):\n # offset: 1\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.list_blob_names\", mock_list_blob_names)\n \n async def mock_delete_blob(self, name, *args, **kwargs):\n assert name == filename\n return True\n \n monkeypatch.setattr(\"azure.storage.blob.aio.ContainerClient.delete_blob\", mock_delete_blob)\n \n await blob_manager.remove_blob(f.content.name)\n \n===========changed ref 2===========\n # module: tests.test_upload\n @pytest.mark.asyncio\n async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client):\n async def mock_delete_file(self):\n return None\n \n monkeypatch.setattr(DataLakeFileClient, \"delete_file\", mock_delete_file)\n + \n + def mock_directory_get_file_client(self, *args, **kwargs):\n + return azure.storage.filedatalake.aio.DataLakeFileClient(\n + account_url=\"https://test.blob.core.windows.net/\", file_system_name=\"user-content\", file_path=args[0]\n + )\n + \n + monkeypatch.setattr(DataLakeDirectoryClient, \"get_file_client\", mock_directory_get_file_client)\n \n class AsyncSearchResultsIterator:\n def __init__(self):\n self.results = [\n {\n \"sourcepage\": \"a.txt\",\n \"sourcefile\": \"a.txt\",\n \"content\": \"This is a test document.\",\n \"embedding\": [],\n \"category\": None,\n \"id\": \"file-a_txt-7465737420646F63756D656E742E706466\",\n \"oids\": [\"OID_X\"],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n },\n {\n \"sourcepage\": \"a.txt\",\n \"sourcefile\": \"a.txt\",\n \"content\": \"This is a test document.\",\n \"embedding\": [],\n \"category\": None,\n \"id\": \"file-a_txt-7465737420646F63756D656E742E706422\",\n \"oids\": [],\n \"@search.score\": 0.03279569745063782,\n \"@search.reranker_score\": 3.4577205181121826,\n "}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":58,"numItemsPerPage":100,"numTotalItems":7800,"offset":5800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzkxODEwNywic3ViIjoiL2RhdGFzZXRzL2tyYWFsZmFyL0NvZWRpdG9yLXByb2Nlc3NlZC1kZW1vMiIsImV4cCI6MTc1NzkyMTcwNywiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.5HrAP8tjd8ZBBz9NCDJ7vpsHsk2d7CtZvsTN0Bl0T40_P5XS0xKfkWFxkzdfsaOm5AryHaFwsQ4Pq4R09cUdDw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
scripts.prepdocslib.embeddings/AzureOpenAIEmbeddingService.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> super().__init__(open_ai_model_name, disable_batch) <del> super().__init__(open_ai_model_name, disable_batch, verbose)
# module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): <0> super().__init__(open_ai_model_name, disable_batch, verbose) <1> self.open_ai_service = open_ai_service <2> self.open_ai_deployment = open_ai_deployment <3> self.credential = credential <4>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) - if self.verbose: + logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") - print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 3=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 11=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool = False, - verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.account = account self.container = container self.store_page_images = store_page_images - self.verbose = verbose self.resourceGroup = resourceGroup self.subscriptionId = subscriptionId self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 13=========== # module: scripts.prepdocs + def setup_blob_manager( + azure_credential: AsyncTokenCredential, + storage_account: str, + storage_container: str, + storage_resource_group: str, + subscription_id: str, + search_images: bool, + storage_key: Union[str, None] = None, + ): + storage_creds: Union[AsyncTokenCredential, str] = azure_credential if storage_key is None else storage_key + return BlobManager( + endpoint=f"https://{storage_account}.blob.core.windows.net", + container=storage_container, + account=storage_account, + credential=storage_creds, + resourceGroup=storage_resource_group, + subscriptionId=subscription_id, + store_page_images=search_images, + ) + ===========changed ref 14=========== # module: tests.test_blob_manager @pytest.fixture def blob_manager(monkeypatch): return BlobManager( endpoint=f"https://{os.environ['AZURE_STORAGE_ACCOUNT']}.blob.core.windows.net", credential=MockAzureCredential(), container=os.environ["AZURE_STORAGE_CONTAINER"], - verbose=True, account=os.environ["AZURE_STORAGE_ACCOUNT"], resourceGroup=os.environ["AZURE_STORAGE_RESOURCE_GROUP"], subscriptionId=os.environ["AZURE_SUBSCRIPTION_ID"], ) ===========changed ref 15=========== # module: scripts.prepdocs + def setup_image_embeddings_service( + azure_credential: AsyncTokenCredential, vision_endpoint: Union[str, None], search_images: bool + ) -> Union[ImageEmbeddings, None]: + image_embeddings_service: Optional[ImageEmbeddings] = None + if search_images: + if vision_endpoint is None: + raise ValueError("A computer vision endpoint is required when GPT-4-vision is enabled.") + image_embeddings_service = ImageEmbeddings( + endpoint=vision_endpoint, + token_provider=get_bearer_token_provider(azure_credential, "https://cognitiveservices.azure.com/.default"), + ) + return image_embeddings_service +
scripts.prepdocslib.embeddings/OpenAIEmbeddingService.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> super().__init__(open_ai_model_name, disable_batch) <del> super().__init__(open_ai_model_name, disable_batch, verbose)
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): <0> super().__init__(open_ai_model_name, disable_batch, verbose) <1> self.credential = credential <2> self.organization = organization <3>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) - if self.verbose: + logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") - print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 4=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 12=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool = False, - verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.account = account self.container = container self.store_page_images = store_page_images - self.verbose = verbose self.resourceGroup = resourceGroup self.subscriptionId = subscriptionId self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 14=========== # module: scripts.prepdocs + def setup_blob_manager( + azure_credential: AsyncTokenCredential, + storage_account: str, + storage_container: str, + storage_resource_group: str, + subscription_id: str, + search_images: bool, + storage_key: Union[str, None] = None, + ): + storage_creds: Union[AsyncTokenCredential, str] = azure_credential if storage_key is None else storage_key + return BlobManager( + endpoint=f"https://{storage_account}.blob.core.windows.net", + container=storage_container, + account=storage_account, + credential=storage_creds, + resourceGroup=storage_resource_group, + subscriptionId=subscription_id, + store_page_images=search_images, + ) + ===========changed ref 15=========== # module: tests.test_blob_manager @pytest.fixture def blob_manager(monkeypatch): return BlobManager( endpoint=f"https://{os.environ['AZURE_STORAGE_ACCOUNT']}.blob.core.windows.net", credential=MockAzureCredential(), container=os.environ["AZURE_STORAGE_CONTAINER"], - verbose=True, account=os.environ["AZURE_STORAGE_ACCOUNT"], resourceGroup=os.environ["AZURE_STORAGE_RESOURCE_GROUP"], subscriptionId=os.environ["AZURE_SUBSCRIPTION_ID"], )
scripts.prepdocslib.embeddings/ImageEmbeddings.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<2>:<del> self.verbose = verbose
# module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): <0> self.token_provider = token_provider <1> self.endpoint = endpoint <2> self.verbose = verbose <3>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) - if self.verbose: + logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") - print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 5=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 9=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 13=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool = False, - verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.account = account self.container = container self.store_page_images = store_page_images - self.verbose = verbose self.resourceGroup = resourceGroup self.subscriptionId = subscriptionId self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 15=========== # module: scripts.prepdocs + def setup_blob_manager( + azure_credential: AsyncTokenCredential, + storage_account: str, + storage_container: str, + storage_resource_group: str, + subscription_id: str, + search_images: bool, + storage_key: Union[str, None] = None, + ): + storage_creds: Union[AsyncTokenCredential, str] = azure_credential if storage_key is None else storage_key + return BlobManager( + endpoint=f"https://{storage_account}.blob.core.windows.net", + container=storage_container, + account=storage_account, + credential=storage_creds, + resourceGroup=storage_resource_group, + subscriptionId=subscription_id, + store_page_images=search_images, + ) +
scripts.prepdocslib.embeddings/ImageEmbeddings.before_retry_sleep
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<del> if self.verbose: <1>:<add> logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") <del> print("Rate limited on the Vision embeddings API, sleeping before retrying...")
# module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): <0> if self.verbose: <1> print("Rate limited on the Vision embeddings API, sleeping before retrying...") <2>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) - if self.verbose: + logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") - print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 6=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 7=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 10=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 14=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool = False, - verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.account = account self.container = container self.store_page_images = store_page_images - self.verbose = verbose self.resourceGroup = resourceGroup self.subscriptionId = subscriptionId self.user_delegation_key: Optional[UserDelegationKey] = None
scripts.prepdocslib.searchmanager/SearchManager.create_index
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<del> if self.search_info.verbose: <1>:<add> logger.info(f"Ensuring search index {self.search_info.index_name} exists") <del> print(f"Ensuring search index {self.search_info.index_name} exists")
# module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): <0> if self.search_info.verbose: <1> print(f"Ensuring search index {self.search_info.index_name} exists") <2> <3> async with self.search_info.create_search_index_client() as search_index_client: <4> fields = [ <5> ( <6> SimpleField(name="id", type="Edm.String", key=True) <7> if not self.use_int_vectorization <8> else SearchField( <9> name="id", <10> type="Edm.String", <11> key=True, <12> sortable=True, <13> filterable=True, <14> facetable=True, <15> analyzer_name="keyword", <16> ) <17> ), <18> SearchableField( <19> name="content", <20> type="Edm.String", <21> analyzer_name=self.search_analyzer_name, <22> ), <23> SearchField( <24> name="embedding", <25> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <26> hidden=False, <27> searchable=True, <28> filterable=False, <29> sortable=False, <30> facetable=False, <31> vector_search_dimensions=1536, <32> vector_search_profile_name="embedding_config", <33> ), <34> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <35> SimpleField( <36> name="sourcepage", <37> type="Edm.String", <38> filterable=True, <39> facetable=True, <40> ), <41> SimpleField( <42> name="sourcefile", <43> type="Edm.String", <44> filterable=True, <45> facetable=True, <46> ), <47> ] <48> if self.use_acls: <49> fields.append( <50> SimpleField( <51> </s>
===========below chunk 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) if self.use_int_vectorization: fields.append(SearchableField(name="parent_id", type="Edm.String", filterable=True)) if self.search_images: fields.append( SearchField( name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1024, vector_search_profile_name="embedding_config", ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_search=SemanticSearch( configurations=[ SemanticConfiguration( name="default", prioritized_fields=SemanticPrioritizedFields( title_field=None, content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswAlgorithmConfiguration( name="hnsw_config", parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm_configuration_name="hnsw_config", vectorizer=( f"{self.search_info.index_name}-vectorizer" if self.use_int_vectorization else None ), ), ], vectorizers=vectorizers, </s> ===========below chunk 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s>use_int_vectorization else None ), ), ], vectorizers=vectorizers, ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: if self.search_info.verbose: print(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: if self.search_info.verbose: print(f"Search index {self.search_info.index_name} already exists") ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 6=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 8=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 12=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization
scripts.prepdocslib.searchmanager/SearchManager.remove_content
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<del> if self.search_info.verbose: <1>:<add> logger.info(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") <del> print(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") <8>:<add> documents_to_remove = [] <add> async for document in result: <add> # If only_oid is set, only remove documents that have only this oid <add> if not only_oid or document["oids"] == [only_oid]: <add> documents_to_remove.append({"id": document["id"]}) <add> removed_docs = await search_client.delete_documents(documents_to_remove) <del> removed_docs = await search_client.delete_documents( <9>:<del> documents=[{"id": document["id"]} async for document in result] <10>:<del> ) <11>:<del> if self.search_info.verbose: <12>:<add> logger.info(f"\tRemoved {len(removed_docs)} sections from index") <del> print(f"\tRemoved {len(removed_docs)} sections from index")
# module: scripts.prepdocslib.searchmanager class SearchManager: + def remove_content(self, path: Optional[str] = None, only_oid: Optional[str] = None): - def remove_content(self, path: Optional[str] = None): <0> if self.search_info.verbose: <1> print(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") <2> async with self.search_info.create_search_client() as search_client: <3> while True: <4> filter = None if path is None else f"sourcefile eq '{os.path.basename(path)}'" <5> result = await search_client.search("", filter=filter, top=1000, include_total_count=True) <6> if await result.get_count() == 0: <7> break <8> removed_docs = await search_client.delete_documents( <9> documents=[{"id": document["id"]} async for document in result] <10> ) <11> if self.search_info.verbose: <12> print(f"\tRemoved {len(removed_docs)} sections from index") <13> # It can take a few seconds for search results to reflect changes, so wait a bit <14> await asyncio.sleep(2) <15>
===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 6=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 8=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 12=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 14=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 16=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool = False, - verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.account = account self.container = container self.store_page_images = store_page_images - self.verbose = verbose self.resourceGroup = resourceGroup self.subscriptionId = subscriptionId self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 17=========== # module: scripts.prepdocs + def setup_blob_manager( + azure_credential: AsyncTokenCredential, + storage_account: str, + storage_container: str, + storage_resource_group: str, + subscription_id: str, + search_images: bool, + storage_key: Union[str, None] = None, + ): + storage_creds: Union[AsyncTokenCredential, str] = azure_credential if storage_key is None else storage_key + return BlobManager( + endpoint=f"https://{storage_account}.blob.core.windows.net", + container=storage_container, + account=storage_account, + credential=storage_creds, + resourceGroup=storage_resource_group, + subscriptionId=subscription_id, + store_page_images=search_images, + ) +
scripts.prepdocslib.filestrategy/FileStrategy.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<7>:<add> self.search_info = search_info
<s>file_strategy: ListFileStrategy, blob_manager: BlobManager, + search_info: SearchInfo, file_processors: dict[str, FileProcessor], document_action: DocumentAction = DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings] = None, image_embeddings: Optional[ImageEmbeddings] = None, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): <0> self.list_file_strategy = list_file_strategy <1> self.blob_manager = blob_manager <2> self.file_processors = file_processors <3> self.document_action = document_action <4> self.embeddings = embeddings <5> self.image_embeddings = image_embeddings <6> self.search_analyzer_name = search_analyzer_name <7> self.use_acls = use_acls <8> self.category = category <9>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.fileprocessor.FileProcessor parser: Parser splitter: TextSplitter at: scripts.prepdocslib.filestrategy logger = logging.getLogger("ingester") at: scripts.prepdocslib.listfilestrategy.File filename() file_extension() at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: scripts.prepdocslib.parser.Parser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.strategy Strategy() at: scripts.prepdocslib.textsplitter.TextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: typing List = _alias(list, 1, inst=False, name='List') at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: scripts.prepdocslib.filestrategy + def parse_file( + file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None + ) -> List[Section]: + key = file.file_extension() + processor = file_processors.get(key) + if processor is None: + logger.info(f"Skipping '{file.filename()}', no parser found.") + return [] + logger.info(f"Parsing '{file.filename()}'") + pages = [page async for page in processor.parser.parse(content=file.content)] + logger.info(f"Splitting '{file.filename()}' into sections") + sections = [ + Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) + ] + return sections + ===========changed ref 1=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 14=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 16=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose
scripts.prepdocslib.filestrategy/FileStrategy.setup
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<1>:<add> self.search_info, <del> search_info,
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): + def setup(self): - def setup(self, search_info: SearchInfo): <0> search_manager = SearchManager( <1> search_info, <2> self.search_analyzer_name, <3> self.use_acls, <4> False, <5> self.embeddings, <6> search_images=self.image_embeddings is not None, <7> ) <8> await search_manager.create_index() <9>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool=False) at: scripts.prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, disable_batch: bool=False) ImageEmbeddings(endpoint: str, token_provider: Callable[[], Awaitable[str]]) at: scripts.prepdocslib.fileprocessor FileProcessor(parser: Parser, splitter: TextSplitter) at: scripts.prepdocslib.listfilestrategy ListFileStrategy() at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str) DocumentAction() ===========changed ref 0=========== <s>file_strategy: ListFileStrategy, blob_manager: BlobManager, + search_info: SearchInfo, file_processors: dict[str, FileProcessor], document_action: DocumentAction = DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings] = None, image_embeddings: Optional[ImageEmbeddings] = None, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.file_processors = file_processors self.document_action = document_action self.embeddings = embeddings self.image_embeddings = image_embeddings self.search_analyzer_name = search_analyzer_name + self.search_info = search_info self.use_acls = use_acls self.category = category ===========changed ref 1=========== # module: scripts.prepdocslib.filestrategy + def parse_file( + file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None + ) -> List[Section]: + key = file.file_extension() + processor = file_processors.get(key) + if processor is None: + logger.info(f"Skipping '{file.filename()}', no parser found.") + return [] + logger.info(f"Parsing '{file.filename()}'") + pages = [page async for page in processor.parser.parse(content=file.content)] + logger.info(f"Splitting '{file.filename()}' into sections") + sections = [ + Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) + ] + return sections + ===========changed ref 2=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 3=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 15=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization
scripts.prepdocslib.filestrategy/FileStrategy.run
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> search_manager = SearchManager( <add> self.search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings <del> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings) <1>:<add> ) <5>:<del> key = file.file_extension() <6>:<del> processor = self.file_processors.get(key) <7>:<del> if processor is None: <8>:<del> # skip file if no parser is found <9>:<del> if search_info.verbose: <10>:<del> print(f"Skipping '{file.filename()}'.") <11>:<del> continue <12>:<del> if search_info.verbose: <13>:<del> print(f"Parsing '{file.filename()}'") <14>:<del> pages = [page async for page in processor.parser.parse(content=file.content)] <15>:<del> if search_info.verbose: <16>:<del> print(f"Splitting '{file.filename()}' into sections") <17>:<add> sections = await parse_file(file, self.file_processors, self.category) <add> if sections: <del> sections = [ <18>:<del> Section(split_page, content=file, category=self.category) <19>:<del> for split_page in processor.splitter.split_pages(pages) <20>:<del> ] <21>:<del> <22>:<add> blob_sas_uris = await self.blob_manager.upload_blob(file) <del> blob_sas_uris = await self.blob_manager.upload_blob(file) <23>:<add> blob_image_embeddings: Optional[List[List[float]]] = None <del> blob_image_embeddings: Optional[List[List[float]]] = None <24>:<add> if self.image_embeddings and blob_sas_uris: <del> if self.image_embeddings and blob_sas_uris: <25>:<add> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <del> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <26>:<add> await search_manager.update_content(sections, blob_image_embeddings) <del> await search_manager.update_content(sections, blob_image_embeddings)
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): + def run(self): - def run(self, search_info: SearchInfo): <0> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings) <1> if self.document_action == DocumentAction.Add: <2> files = self.list_file_strategy.list() <3> async for file in files: <4> try: <5> key = file.file_extension() <6> processor = self.file_processors.get(key) <7> if processor is None: <8> # skip file if no parser is found <9> if search_info.verbose: <10> print(f"Skipping '{file.filename()}'.") <11> continue <12> if search_info.verbose: <13> print(f"Parsing '{file.filename()}'") <14> pages = [page async for page in processor.parser.parse(content=file.content)] <15> if search_info.verbose: <16> print(f"Splitting '{file.filename()}' into sections") <17> sections = [ <18> Section(split_page, content=file, category=self.category) <19> for split_page in processor.splitter.split_pages(pages) <20> ] <21> <22> blob_sas_uris = await self.blob_manager.upload_blob(file) <23> blob_image_embeddings: Optional[List[List[float]]] = None <24> if self.image_embeddings and blob_sas_uris: <25> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <26> await search_manager.update_content(sections, blob_image_embeddings) <27> finally: <28> if file: <29> file.close() <30> elif self.document_action == DocumentAction.Remove: <31> paths = self.list_file_strategy.list_paths() <32> async for path in paths: <33> await self.blob_manager.remove_blob(path) <34> await search_</s>
===========below chunk 0=========== # module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): + def run(self): - def run(self, search_info: SearchInfo): # offset: 1 elif self.document_action == DocumentAction.RemoveAll: await self.blob_manager.remove_blob() await search_manager.remove_content() ===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager upload_blob(file: File) -> Optional[List[str]] at: scripts.prepdocslib.embeddings.ImageEmbeddings create_embeddings(blob_urls: List[str]) -> List[List[float]] at: scripts.prepdocslib.filestrategy parse_file(file: File, file_processors: dict[str, FileProcessor], category: Optional[str]=None) -> List[Section] at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list() -> AsyncGenerator[File, None] at: scripts.prepdocslib.searchmanager SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) at: scripts.prepdocslib.searchmanager.SearchManager create_index(vectorizers: Optional[List[VectorSearchVectorizer]]=None) update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None) at: scripts.prepdocslib.strategy DocumentAction() at: scripts.prepdocslib.strategy.Strategy setup(self) run(self) at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.filestrategy + def parse_file( + file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None + ) -> List[Section]: + key = file.file_extension() + processor = file_processors.get(key) + if processor is None: + logger.info(f"Skipping '{file.filename()}', no parser found.") + return [] + logger.info(f"Parsing '{file.filename()}'") + pages = [page async for page in processor.parser.parse(content=file.content)] + logger.info(f"Splitting '{file.filename()}' into sections") + sections = [ + Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) + ] + return sections + ===========changed ref 1=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: async with BlobServiceClient( account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() # Re-open and upload the original file with open(file.content.name, "rb") as reopened_file: blob_name = BlobManager.blob_name_from_file_name(file.content.name) + logger.info(f"\tUploading blob for whole file -> {blob_name}") - print(f"\tUploading blob for whole file -> {blob_name}") await container_client.upload_blob(blob_name, reopened_file, overwrite=True) if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": return await self.upload_pdf_blob_images(service_client, container_client, file) return None ===========changed ref 2=========== # module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): + def setup(self): - def setup(self, search_info: SearchInfo): search_manager = SearchManager( + self.search_info, - search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings, search_images=self.image_embeddings is not None, ) await search_manager.create_index() ===========changed ref 3=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): - if self.search_info.verbose: + logger.info(f"Ensuring search index {self.search_info.index_name} exists") - print(f"Ensuring search index {self.search_info.index_name} exists") async with self.search_info.create_search_index_client() as search_index_client: fields = [ ( SimpleField(name="id", type="Edm.String", key=True) if not self.use_int_vectorization else SearchField( name="id", type="Edm.String", key=True, sortable=True, filterable=True, facetable=True, analyzer_name="keyword", ) ), SearchableField( name="content", type="Edm.String", analyzer_name=self.search_analyzer_name, ), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_profile_name="embedding_config", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField( name="sourcepage", type="Edm.String", filterable=True, facetable=True, ), SimpleField( name="sourcefile", type="Edm.String", filterable=True, facetable=True, ), ] if self.use_acls: fields.append( SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filter</s>
scripts.prepdocslib.htmlparser/LocalHTMLParser.parse
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<7>:<del> if self.verbose: <8>:<add> logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") <del> print(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)")
# module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> """Parses the given content. <1> To learn more, please visit https://pypi.org/project/beautifulsoup4/ <2> Args: <3> content (IO): The content to parse. <4> Returns: <5> Page: The parsed html Page. <6> """ <7> if self.verbose: <8> print(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") <9> <10> data = content.read() <11> soup = BeautifulSoup(data, "html.parser") <12> <13> # Get text only from html file <14> result = soup.get_text() <15> <16> yield Page(0, 0, text=cleanup_data(result)) <17>
===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 11=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 16=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 18=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) + ===========changed ref 19=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose ===========changed ref 20=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def __init__( + self, + search_info: SearchInfo, + file_processors: dict[str, FileProcessor], + embeddings: Optional[OpenAIEmbeddings] = None, + image_embeddings: Optional[ImageEmbeddings] = None, + ): + self.file_processors = file_processors + self.embeddings = embeddings + self.image_embeddings = image_embeddings + self.search_info = search_info + self.search_manager = SearchManager(self.search_info, None, True, False, self.embeddings) + ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, - verbose: bool = False, ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential
tests.test_htmlparser/test_htmlparser_full
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<22>:<add> htmlparser = LocalHTMLParser() <del> htmlparser = LocalHTMLParser(verbose=True)
# module: tests.test_htmlparser @pytest.mark.asyncio async def test_htmlparser_full(): <0> file = io.StringIO( <1> """ <2> <html> <3> <head> <4> <title>Test title</title> <5> </head> <6> <body> <7> <!-- Test comment --> <8> <h1>Test header</h1> <9> <p> <10> Test paragraph one<br> <11> Test paragraph two<br><br> <12> Test paragraph three<br><br><br> <13> </p> <14> <p> <15> ---------- Test hyphens ---------- <16> </p> <17> </body> <18> </html> <19> """ <20> ) <21> file.name = "test.json" <22> htmlparser = LocalHTMLParser(verbose=True) <23> pages = [page async for page in htmlparser.parse(file)] <24> assert len(pages) == 1 <25> assert pages[0].page_num == 0 <26> assert pages[0].offset == 0 <27> assert ( <28> pages[0].text <29> == "Test title\nTest header\n Test paragraph one\n Test paragraph two\n Test paragraph three\n -- Test hyphens --" <30> ) <31>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: io StringIO(initial_value: Optional[str]=..., newline: Optional[str]=...) at: io.StringIO name: Any at: scripts.prepdocslib.htmlparser LocalHTMLParser(verbose: bool=False) at: scripts.prepdocslib.htmlparser.LocalHTMLParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.page.Page.__init__ self.page_num = page_num self.offset = offset self.text = text ===========changed ref 0=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ - if self.verbose: + logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") - print(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 1=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 2=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 9=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 13=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 18=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 20=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) +
tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> with caplog.at_level(logging.INFO): <add> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <del> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1>:<add> with pytest.raises(tenacity.RetryError): <del> with pytest.raises(tenacity.RetryError): <2>:<add> embeddings = AzureOpenAIEmbeddingService( <del> embeddings = AzureOpenAIEmbeddingService( <3>:<add> open_ai_service="x", <del> open_ai_service="x", <4>:<add> open_ai_deployment="x", <del> open_ai_deployment="x", <5>:<add> open_ai_model_name="text-embedding-ada-002", <del> open_ai_model_name="text-embedding-ada-002", <6>:<add> credential=MockAzureCredential(), <del> credential=MockAzureCredential(), <7>:<add> disable_batch=False, <del> disable_batch=False, <8>:<del> verbose=True, <9>:<add> ) <del> ) <10>:<add> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <del> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11>:<add> await embeddings.create_embeddings(texts=["foo"]) <del> await embeddings.create_embeddings(texts=["foo"]) <12>:<del> captured = capsys.readouterr() <13>:<add> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <del> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14
# module: tests.test_prepdocs @pytest.mark.asyncio + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): <0> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1> with pytest.raises(tenacity.RetryError): <2> embeddings = AzureOpenAIEmbeddingService( <3> open_ai_service="x", <4> open_ai_deployment="x", <5> open_ai_model_name="text-embedding-ada-002", <6> credential=MockAzureCredential(), <7> disable_batch=False, <8> verbose=True, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> captured = capsys.readouterr() <13> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 <14>
===========unchanged ref 0=========== at: _pytest.logging caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None] at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: logging INFO = 20 at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) at: scripts.prepdocslib.embeddings.OpenAIEmbeddings SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} create_embeddings(texts: List[str]) -> List[List[float]] at: tenacity RetryError(last_attempt: "Future") at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.test_prepdocs create_rate_limit_client(*args, **kwargs) ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 12=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 17=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 19=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) +
tests.test_prepdocs/test_compute_embedding_ratelimiterror_single
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> with caplog.at_level(logging.INFO): <add> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <del> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1>:<add> with pytest.raises(tenacity.RetryError): <del> with pytest.raises(tenacity.RetryError): <2>:<add> embeddings = AzureOpenAIEmbeddingService( <del> embeddings = AzureOpenAIEmbeddingService( <3>:<add> open_ai_service="x", <del> open_ai_service="x", <4>:<add> open_ai_deployment="x", <del> open_ai_deployment="x", <5>:<add> open_ai_model_name="text-embedding-ada-002", <del> open_ai_model_name="text-embedding-ada-002", <6>:<add> credential=MockAzureCredential(), <del> credential=MockAzureCredential(), <7>:<add> disable_batch=True, <del> disable_batch=True, <8>:<del> verbose=True, <9>:<add> ) <del> ) <10>:<add> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <del> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11>:<add> await embeddings.create_embeddings(texts=["foo"]) <del> await embeddings.create_embeddings(texts=["foo"]) <12>:<del> captured = capsys.readouterr() <13>:<add> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <del> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14
# module: tests.test_prepdocs @pytest.mark.asyncio + async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): - async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys): <0> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1> with pytest.raises(tenacity.RetryError): <2> embeddings = AzureOpenAIEmbeddingService( <3> open_ai_service="x", <4> open_ai_deployment="x", <5> open_ai_model_name="text-embedding-ada-002", <6> credential=MockAzureCredential(), <7> disable_batch=True, <8> verbose=True, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> captured = capsys.readouterr() <13> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: logging INFO = 20 at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) at: scripts.prepdocslib.embeddings.OpenAIEmbeddings create_embeddings(texts: List[str]) -> List[List[float]] at: tenacity RetryError(last_attempt: "Future") at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.test_prepdocs create_rate_limit_client(*args, **kwargs) ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): + with caplog.at_level(logging.INFO): + monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) - monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) + with pytest.raises(tenacity.RetryError): - with pytest.raises(tenacity.RetryError): + embeddings = AzureOpenAIEmbeddingService( - embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", - open_ai_service="x", + open_ai_deployment="x", - open_ai_deployment="x", + open_ai_model_name="text-embedding-ada-002", - open_ai_model_name="text-embedding-ada-002", + credential=MockAzureCredential(), - credential=MockAzureCredential(), + disable_batch=False, - disable_batch=False, - verbose=True, + ) - ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) - monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) + await embeddings.create_embeddings(texts=["foo"]) - await embeddings.create_embeddings(texts=["foo"]) - captured = capsys.readouterr() + assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 - assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 2=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 9=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 13=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<8>:<del> verbose=True, <20>:<del> verbose=True,
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): <0> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1> with pytest.raises(openai.AuthenticationError): <2> embeddings = AzureOpenAIEmbeddingService( <3> open_ai_service="x", <4> open_ai_deployment="x", <5> open_ai_model_name="text-embedding-ada-002", <6> credential=MockAzureCredential(), <7> disable_batch=False, <8> verbose=True, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> <13> with pytest.raises(openai.AuthenticationError): <14> embeddings = AzureOpenAIEmbeddingService( <15> open_ai_service="x", <16> open_ai_deployment="x", <17> open_ai_model_name="text-embedding-ada-002", <18> credential=MockAzureCredential(), <19> disable_batch=True, <20> verbose=True, <21> ) <22> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <23> await embeddings.create_embeddings(texts=["foo"]) <24>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) at: scripts.prepdocslib.embeddings.OpenAIEmbeddings create_embeddings(texts: List[str]) -> List[List[float]] at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.test_prepdocs create_auth_error_limit_client(*args, **kwargs) ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio + async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): - async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys): + with caplog.at_level(logging.INFO): + monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) - monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) + with pytest.raises(tenacity.RetryError): - with pytest.raises(tenacity.RetryError): + embeddings = AzureOpenAIEmbeddingService( - embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", - open_ai_service="x", + open_ai_deployment="x", - open_ai_deployment="x", + open_ai_model_name="text-embedding-ada-002", - open_ai_model_name="text-embedding-ada-002", + credential=MockAzureCredential(), - credential=MockAzureCredential(), + disable_batch=True, - disable_batch=True, - verbose=True, + ) - ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) - monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) + await embeddings.create_embeddings(texts=["foo"]) - await embeddings.create_embeddings(texts=["foo"]) - captured = capsys.readouterr() + assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 - assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio + async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): - async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): + with caplog.at_level(logging.INFO): + monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) - monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) + with pytest.raises(tenacity.RetryError): - with pytest.raises(tenacity.RetryError): + embeddings = AzureOpenAIEmbeddingService( - embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", - open_ai_service="x", + open_ai_deployment="x", - open_ai_deployment="x", + open_ai_model_name="text-embedding-ada-002", - open_ai_model_name="text-embedding-ada-002", + credential=MockAzureCredential(), - credential=MockAzureCredential(), + disable_batch=False, - disable_batch=False, - verbose=True, + ) - ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) - monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) + await embeddings.create_embeddings(texts=["foo"]) - await embeddings.create_embeddings(texts=["foo"]) - captured = capsys.readouterr() + assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 - assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 10=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ +
scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): <del> if not embeddings: <1>:<add> raise Exception("Expecting AzureOpenAI embedding service") <del> raise Exception("Expecting AzureOpenAI embedding Service") <12>:<add> self.search_info = search_info
<s> self, list_file_strategy: ListFileStrategy, blob_manager: BlobManager, + search_info: SearchInfo, embeddings: Optional[AzureOpenAIEmbeddingService], subscription_id: str, search_service_user_assigned_id: str, document_action: DocumentAction = DocumentAction.Add, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): <0> if not embeddings: <1> raise Exception("Expecting AzureOpenAI embedding Service") <2> <3> self.list_file_strategy = list_file_strategy <4> self.blob_manager = blob_manager <5> self.document_action = document_action <6> self.embeddings = embeddings <7> self.subscription_id = subscription_id <8> self.search_user_assigned_identity = search_service_user_assigned_id <9> self.search_analyzer_name = search_analyzer_name <10> self.use_acls = use_acls <11> self.category = category <12>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, account: str, credential: Union[AsyncTokenCredential, str], resourceGroup: str, subscriptionId: str, store_page_images: bool=False) at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) at: scripts.prepdocslib.listfilestrategy ListFileStrategy() at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str) DocumentAction() ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 9=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 12=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 17=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 19=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) + ===========changed ref 20=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose
scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.setup
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<1>:<add> search_info=self.search_info, <del> search_info=search_info, <15>:<add> name=f"{self.search_info.index_name}-vectorizer", <del> name=f"{search_info.index_name}-vectorizer", <26>:<add> ds_client = self.search_info.create_search_indexer_client() <del> ds_client = search_info.create_search_indexer_client() <29>:<add> name=f"{self.search_info.index_name}-blob", <del> name=f"{search_info.index_name}-blob", <37>:<add> logger.info("Search indexer data source connection updated.") <del> print("Search indexer data source connection updated.") <39>:<add> embedding_skillset = await self.create_embedding_skill(self.search_info.index_name) <del> embedding_skillset = await self.create_embedding_skill(search_info.index_
# module: scripts.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): + def setup(self): - def setup(self, search_info: SearchInfo): <0> search_manager = SearchManager( <1> search_info=search_info, <2> search_analyzer_name=self.search_analyzer_name, <3> use_acls=self.use_acls, <4> use_int_vectorization=True, <5> embeddings=self.embeddings, <6> search_images=False, <7> ) <8> <9> if self.embeddings is None: <10> raise ValueError("Expecting Azure Open AI instance") <11> <12> await search_manager.create_index( <13> vectorizers=[ <14> AzureOpenAIVectorizer( <15> name=f"{search_info.index_name}-vectorizer", <16> kind="azureOpenAI", <17> azure_open_ai_parameters=AzureOpenAIParameters( <18> resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <19> deployment_id=self.embeddings.open_ai_deployment, <20> ), <21> ), <22> ] <23> ) <24> <25> # create indexer client <26> ds_client = search_info.create_search_indexer_client() <27> ds_container = SearchIndexerDataContainer(name=self.blob_manager.container) <28> data_source_connection = SearchIndexerDataSourceConnection( <29> name=f"{search_info.index_name}-blob", <30> type="azureblob", <31> connection_string=self.blob_manager.get_managedidentity_connectionstring(), <32> container=ds_container, <33> data_deletion_detection_policy=NativeBlobSoftDeleteDeletionDetectionPolicy(), <34> ) <35> <36> await ds_client.create_or_update_data_source_connection(data_source_connection) <37> print("Search indexer data source connection updated.") <38> <39> embedding_skillset = await self.create_embedding_skill(search_info.index_</s>
===========below chunk 0=========== # module: scripts.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): + def setup(self): - def setup(self, search_info: SearchInfo): # offset: 1 await ds_client.create_or_update_skillset(embedding_skillset) await ds_client.close() ===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager get_managedidentity_connectionstring() at: scripts.prepdocslib.blobmanager.BlobManager.__init__ self.container = container at: scripts.prepdocslib.embeddings.AzureOpenAIEmbeddingService.__init__ self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.__init__ self.blob_manager = blob_manager self.embeddings = embeddings self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.search_info = search_info at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.create_embedding_skill index_projections = SearchIndexerIndexProjections( selectors=[ SearchIndexerIndexProjectionSelector( target_index_name=index_name, parent_key_field_name="parent_id", source_context="/document/pages/*", mappings=[ InputFieldMappingEntry(name="content", source="/document/pages/*"), InputFieldMappingEntry(name="embedding", source="/document/pages/*/vector"), InputFieldMappingEntry(name="sourcepage", source="/document/metadata_storage_name"), ], ), ], parameters=SearchIndexerIndexProjectionsParameters( projection_mode=IndexProjectionMode.SKIP_INDEXING_PARENT_DOCUMENTS ), ) skillset = SearchIndexerSkillset( name=skillset_name, description="Skillset to chunk documents and generate embeddings", skills=[split_skill, embedding_skill], index_projections=index_projections, ) ===========unchanged ref 1=========== at: scripts.prepdocslib.searchmanager SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) at: scripts.prepdocslib.searchmanager.SearchManager create_index(vectorizers: Optional[List[VectorSearchVectorizer]]=None) at: scripts.prepdocslib.strategy.SearchInfo create_search_indexer_client() -> SearchIndexerClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name at: scripts.prepdocslib.strategy.Strategy setup(self) ===========changed ref 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): - if self.search_info.verbose: + logger.info(f"Ensuring search index {self.search_info.index_name} exists") - print(f"Ensuring search index {self.search_info.index_name} exists") async with self.search_info.create_search_index_client() as search_index_client: fields = [ ( SimpleField(name="id", type="Edm.String", key=True) if not self.use_int_vectorization else SearchField( name="id", type="Edm.String", key=True, sortable=True, filterable=True, facetable=True, analyzer_name="keyword", ) ), SearchableField( name="content", type="Edm.String", analyzer_name=self.search_analyzer_name, ), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_profile_name="embedding_config", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField( name="sourcepage", type="Edm.String", filterable=True, facetable=True, ), SimpleField( name="sourcefile", type="Edm.String", filterable=True, facetable=True, ), ] if self.use_acls: fields.append( SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filter</s> ===========changed ref 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 <s> SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) if self.use_int_vectorization: fields.append(SearchableField(name="parent_id", type="Edm.String", filterable=True)) if self.search_images: fields.append( SearchField( name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1024, vector_search_profile_name="embedding_config", ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_search=SemanticSearch( configurations=[ SemanticConfiguration( name="default", prioritized_fields=SemanticPrioritizedFields( title_field=None, content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswAlgorithmConfiguration( name="hnsw_config", parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm_configuration_name="hnsw_config", vectorizer=( f"{self</s> ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s>_info.index_name}-vectorizer" if self.use_int_vectorization else None ), ), ], vectorizers=vectorizers, ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: - if self.search_info.verbose: + logger.info(f"Creating {self.search_info.index_name} search index") - print(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: - if self.search_info.verbose: + logger.info(f"Search index {self.search_info.index_name} already exists") - print(f"Search index {self.search_info.index_name} already exists")
scripts.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.run
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<16>:<add> indexer_name = f"{self.search_info.index_name}-indexer" <del> indexer_name = f"{search_info.index_name}-indexer" <21>:<add> skillset_name=f"{self.search_info.index_name}-skillset", <del> skillset_name=f"{search_info.index_name}-skillset", <22>:<add> target_index_name=self.search_info.index_name, <del> target_index_name=search_info.index_name, <23>:<add> data_source_name=f"{self.search_info.index_name}-blob", <del> data_source_name=f"{search_info.index_name}-blob", <28>:<add> indexer_client = self.search_info.create_search_indexer_client() <del> indexer_client = search_info.create_search_indexer_client() <35>:<add> logger.info( <del> print(
# module: scripts.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): + def run(self): - def run(self, search_info: SearchInfo): <0> if self.document_action == DocumentAction.Add: <1> files = self.list_file_strategy.list() <2> async for file in files: <3> try: <4> await self.blob_manager.upload_blob(file) <5> finally: <6> if file: <7> file.close() <8> elif self.document_action == DocumentAction.Remove: <9> paths = self.list_file_strategy.list_paths() <10> async for path in paths: <11> await self.blob_manager.remove_blob(path) <12> elif self.document_action == DocumentAction.RemoveAll: <13> await self.blob_manager.remove_blob() <14> <15> # Create an indexer <16> indexer_name = f"{search_info.index_name}-indexer" <17> <18> indexer = SearchIndexer( <19> name=indexer_name, <20> description="Indexer to index documents and generate embeddings", <21> skillset_name=f"{search_info.index_name}-skillset", <22> target_index_name=search_info.index_name, <23> data_source_name=f"{search_info.index_name}-blob", <24> # Map the metadata_storage_name field to the title field in the index to display the PDF title in the search results <25> field_mappings=[FieldMapping(source_field_name="metadata_storage_name", target_field_name="title")], <26> ) <27> <28> indexer_client = search_info.create_search_indexer_client() <29> indexer_result = await indexer_client.create_or_update_indexer(indexer) <30> <31> # Run the indexer <32> await indexer_client.run_indexer(indexer_name) <33> await indexer_client.close() <34> <35> print( <36> f"Successfully created index, indexer: {indexer_result.name}, and skillset. Please navigate to search service in Azure Portal to</s>
===========below chunk 0=========== # module: scripts.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): + def run(self): - def run(self, search_info: SearchInfo): # offset: 1 ) ===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager upload_blob(file: File) -> Optional[List[str]] remove_blob(path: Optional[str]=None) at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy create_embedding_skill(index_name: str) at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.__init__ self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.document_action = document_action self.search_info = search_info at: scripts.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.setup ds_client = self.search_info.create_search_indexer_client() at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list() -> AsyncGenerator[File, None] list_paths() -> AsyncGenerator[str, None] at: scripts.prepdocslib.strategy DocumentAction() at: scripts.prepdocslib.strategy.SearchInfo create_search_indexer_client() -> SearchIndexerClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name at: scripts.prepdocslib.strategy.Strategy run(self) ===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: async with BlobServiceClient( account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() # Re-open and upload the original file with open(file.content.name, "rb") as reopened_file: blob_name = BlobManager.blob_name_from_file_name(file.content.name) + logger.info(f"\tUploading blob for whole file -> {blob_name}") - print(f"\tUploading blob for whole file -> {blob_name}") await container_client.upload_blob(blob_name, reopened_file, overwrite=True) if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": return await self.upload_pdf_blob_images(service_client, container_client, file) return None ===========changed ref 1=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue - if self.verbose: + logger.info(f"\tRemoving blob {blob_path}") - print(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path) ===========changed ref 2=========== <s> self, list_file_strategy: ListFileStrategy, blob_manager: BlobManager, + search_info: SearchInfo, embeddings: Optional[AzureOpenAIEmbeddingService], subscription_id: str, search_service_user_assigned_id: str, document_action: DocumentAction = DocumentAction.Add, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): + if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): - if not embeddings: + raise Exception("Expecting AzureOpenAI embedding service") - raise Exception("Expecting AzureOpenAI embedding Service") self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.document_action = document_action self.embeddings = embeddings self.subscription_id = subscription_id self.search_user_assigned_identity = search_service_user_assigned_id self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category + self.search_info = search_info ===========changed ref 3=========== # module: scripts.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): + def setup(self): - def setup(self, search_info: SearchInfo): search_manager = SearchManager( + search_info=self.search_info, - search_info=search_info, search_analyzer_name=self.search_analyzer_name, use_acls=self.use_acls, use_int_vectorization=True, embeddings=self.embeddings, search_images=False, ) if self.embeddings is None: raise ValueError("Expecting Azure Open AI instance") await search_manager.create_index( vectorizers=[ AzureOpenAIVectorizer( + name=f"{self.search_info.index_name}-vectorizer", - name=f"{search_info.index_name}-vectorizer", kind="azureOpenAI", azure_open_ai_parameters=AzureOpenAIParameters( resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", deployment_id=self.embeddings.open_ai_deployment, ), ), ] ) # create indexer client + ds_client = self.search_info.create_search_indexer_client() - ds_client = search_info.create_search_indexer_client() ds_container = SearchIndexerDataContainer(name=self.blob_manager.container) data_source_connection = SearchIndexerDataSourceConnection( + name=f"{self.search_info.index_name}-blob", - name=f"{search_info.index_name}-blob", type="azureblob", connection_string=self.blob_manager.get_managedidentity_connectionstring(), container=ds_container, data_deletion_detection_policy=NativeBlobSoftDeleteDeletionDetectionPolicy(), ) await ds_client.create_or_update_data_source_connection(data_source_connection)</s>
scripts.prepdocslib.pdfparser/LocalPdfParser.parse
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<del> if self.verbose: <1>:<add> logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") <del> print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)")
# module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> if self.verbose: <1> print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") <2> <3> reader = PdfReader(content) <4> pages = reader.pages <5> offset = 0 <6> for page_num, p in enumerate(pages): <7> page_text = p.extract_text() <8> yield Page(page_num=page_num, offset=offset, text=page_text) <9> offset += len(page_text) <10>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.parser.Parser parse(self, content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.pdfparser logger = logging.getLogger("ingester") at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) IO() at: typing.IO __slots__ = () ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 9=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 10=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 13=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 18=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 20=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) + ===========changed ref 21=========== # module: scripts.prepdocslib.listfilestrategy class ADLSGen2ListFileStrategy(ListFileStrategy): def __init__( self, data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], - verbose: bool = False, ): self.data_lake_storage_account = data_lake_storage_account self.data_lake_filesystem = data_lake_filesystem self.data_lake_path = data_lake_path self.credential = credential - self.verbose = verbose
scripts.prepdocslib.pdfparser/DocumentAnalysisParser.__init__
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<3>:<del> self.verbose = verbose
# module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): <0> self.model_id = model_id <1> self.endpoint = endpoint <2> self.credential = credential <3> self.verbose = verbose <4>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.parser.Parser __init__(self, verbose: bool=False) parse(self, content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.pdfparser logger = logging.getLogger("ingester") at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) IO() ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: - if self.verbose: + logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") - print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 2=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 10=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 14=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 19=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 20=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 21=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) +
scripts.prepdocslib.pdfparser/DocumentAnalysisParser.parse
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<del> if self.verbose: <1>:<add> logger.info(f"Extracting text from '{content.name}' using Azure Document Intelligence") <del> print(f"Extracting text from '{content.name}' using Azure Document Intelligence")
# module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> if self.verbose: <1> print(f"Extracting text from '{content.name}' using Azure Document Intelligence") <2> <3> async with DocumentIntelligenceClient( <4> endpoint=self.endpoint, credential=self.credential <5> ) as document_intelligence_client: <6> poller = await document_intelligence_client.begin_analyze_document( <7> model_id=self.model_id, analyze_request=content, content_type="application/octet-stream" <8> ) <9> form_recognizer_results = await poller.result() <10> <11> offset = 0 <12> for page_num, page in enumerate(form_recognizer_results.pages): <13> tables_on_page = [ <14> table <15> for table in (form_recognizer_results.tables or []) <16> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 <17> ] <18> <19> # mark all positions of the table spans in the page <20> page_offset = page.spans[0].offset <21> page_length = page.spans[0].length <22> table_chars = [-1] * page_length <23> for table_id, table in enumerate(tables_on_page): <24> for span in table.spans: <25> # replace all table spans with "table_id" in table_chars array <26> for i in range(span.length): <27> idx = span.offset - page_offset + i <28> if idx >= 0 and idx < page_length: <29> table_chars[idx] = table_id <30> <31> # build page text by replacing characters in table spans with table html <32> page_text = "" <33> added_tables = set() <34> for idx, table_id in enumerate(table_chars): <35> if table_id == -1: <36> page_text += form_recognizer_</s>
===========below chunk 0=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: # offset: 1 elif table_id not in added_tables: page_text += DocumentAnalysisParser.table_to_html(tables_on_page[table_id]) added_tables.add(table_id) yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.pdfparser DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout") at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser.__init__ self.model_id = model_id self.endpoint = endpoint self.credential = credential ===========changed ref 0=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): self.model_id = model_id self.endpoint = endpoint self.credential = credential - self.verbose = verbose ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: - if self.verbose: + logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") - print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 2=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 3=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 11=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 15=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 17=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 20=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization
tests.test_searchmanager/search_info
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<4>:<del> verbose=True,
# module: tests.test_searchmanager @pytest.fixture def search_info(): <0> return SearchInfo( <1> endpoint="https://testsearchclient.blob.core.windows.net", <2> credential=AzureKeyCredential("test"), <3> index_name="test", <4> verbose=True, <5> ) <6>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str) ===========changed ref 0=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 10=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 11=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 13=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 14=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): self.model_id = model_id self.endpoint = endpoint self.credential = credential - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 17=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 20=========== # module: scripts.prepdocs + def clean_key_if_exists(key: Union[str, None]) -> Union[str, None]: + """Remove leading and trailing whitespace from a key if it exists. If the key is empty, return None.""" + if key is not None and key.strip() != "": + return key.strip() + return None + ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( - self, - open_ai_model_name: str, - credential: str, - organization: Optional[str] = None, - disable_batch: bool = False, - verbose: bool = False, + self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, disable_batch) - super().__init__(open_ai_model_name, disable_batch, verbose) self.credential = credential self.organization = organization ===========changed ref 22=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def add_file(self, file: File): + if self.image_embeddings: + logging.warning("Image embeddings are not currently supported for the user upload feature") + sections = await parse_file(file, self.file_processors) + if sections: + await self.search_manager.update_content(sections) +
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_empty_pages
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> t = SentenceTextSplitter(has_image_embeddings=False) <del> t = SentenceTextSplitter(False, True)
# module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): <0> t = SentenceTextSplitter(False, True) <1> <2> assert list(t.split_pages([])) == [] <3>
===========unchanged ref 0=========== at: scripts.prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # Chunking is disabled when using GPT4V. To be updated in the future. if self.has_image_embeddings: for i, page in enumerate(pages): yield SplitPage(page_num=i, text=page.text) def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole</s> ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping - if self.verbose: - print( + logger.info( + f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" - f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>} table start {last_table_start}" + ) - ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end]) ===========changed ref 3=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 4=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 12=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 13=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ +
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_small_pages
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> t = SentenceTextSplitter(has_image_embeddings=False) <del> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)
# module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_small_pages(): <0> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) <1> <2> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text="Not a large page")])) <3> assert len(split_pages) == 1 <4> assert split_pages[0].page_num == 0 <5> assert split_pages[0].text == "Not a large page" <6>
===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.page.SplitPage.__init__ self.page_num = page_num self.text = text at: scripts.prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # Chunking is disabled when using GPT4V. To be updated in the future. if self.has_image_embeddings: for i, page in enumerate(pages): yield SplitPage(page_num=i, text=page.text) def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole</s> ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping - if self.verbose: - print( + logger.info( + f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" - f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>} table start {last_table_start}" + ) - ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end]) ===========changed ref 3=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(False, True) assert list(t.split_pages([])) == [] ===========changed ref 4=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 5=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 12=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 13=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 14=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_list_parse_and_split
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> text_splitter = SentenceTextSplitter(has_image_embeddings=False) <del> text_splitter = SentenceTextSplitter(False, True) <1>:<add> pdf_parser = LocalPdfParser() <del> pdf_parser = LocalPdfParser(verbose=True) <5>:<add> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) <del> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True)
# module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot): <0> text_splitter = SentenceTextSplitter(False, True) <1> pdf_parser = LocalPdfParser(verbose=True) <2> for pdf in Path("data").glob("*.pdf"): <3> shutil.copy(str(pdf.absolute()), tmp_path) <4> <5> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True) <6> files = list_file_strategy.list() <7> processed = 0 <8> results = {} <9> async for file in files: <10> pages = [page async for page in pdf_parser.parse(content=file.content)] <11> assert pages <12> sections = [ <13> Section(split_page, content=file, category="test category") <14> for split_page in text_splitter.split_pages(pages) <15> ] <16> assert sections <17> results[file.filename()] = [section.split_page.text for section in sections] <18> processed += 1 <19> assert processed > 1 <20> # Sort results by key <21> results = {k: results[k] for k in sorted(results)} <22> snapshot.assert_match(json.dumps(results, indent=2), "text_splitter_sections.txt") <23>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.tmpdir tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Generator[Path, None, None] at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: pathlib Path() at: pathlib.Path __slots__ = () glob(pattern: str) -> Generator[_P, None, None] absolute() -> _P at: scripts.prepdocslib.listfilestrategy LocalListFileStrategy(path_pattern: str) at: scripts.prepdocslib.listfilestrategy.File filename() at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy list() -> AsyncGenerator[File, None] at: scripts.prepdocslib.page.SplitPage.__init__ self.text = text at: scripts.prepdocslib.pdfparser LocalPdfParser(verbose: bool=False) at: scripts.prepdocslib.pdfparser.LocalPdfParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page ===========unchanged ref 1=========== at: scripts.prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: shutil copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn ===========changed ref 0=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: - if self.verbose: + logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") - print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # Chunking is disabled when using GPT4V. To be updated in the future. if self.has_image_embeddings: for i, page in enumerate(pages): yield SplitPage(page_num=i, text=page.text) def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping - if self.verbose: - print( + logger.info( + f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" - f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset</s> ===========changed ref 3=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>} table start {last_table_start}" + ) - ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])
tests.test_prepdocslib_textsplitter/test_simpletextsplitter_split_empty_pages
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> t = SimpleTextSplitter() <del> t = SimpleTextSplitter(True)
# module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_empty_pages(): <0> t = SimpleTextSplitter(True) <1> <2> assert list(t.split_pages([])) == [] <3>
===========unchanged ref 0=========== at: scripts.prepdocslib.textsplitter SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False) at: scripts.prepdocslib.textsplitter.SimpleTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(False, True) assert list(t.split_pages([])) == [] ===========changed ref 1=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_small_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text="Not a large page")])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == "Not a large page" ===========changed ref 2=========== # module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot): + text_splitter = SentenceTextSplitter(has_image_embeddings=False) - text_splitter = SentenceTextSplitter(False, True) + pdf_parser = LocalPdfParser() - pdf_parser = LocalPdfParser(verbose=True) for pdf in Path("data").glob("*.pdf"): shutil.copy(str(pdf.absolute()), tmp_path) + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True) files = list_file_strategy.list() processed = 0 results = {} async for file in files: pages = [page async for page in pdf_parser.parse(content=file.content)] assert pages sections = [ Section(split_page, content=file, category="test category") for split_page in text_splitter.split_pages(pages) ] assert sections results[file.filename()] = [section.split_page.text for section in sections] processed += 1 assert processed > 1 # Sort results by key results = {k: results[k] for k in sorted(results)} snapshot.assert_match(json.dumps(results, indent=2), "text_splitter_sections.txt") ===========changed ref 3=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 4=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 12=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 13=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 14=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 17=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): self.model_id = model_id self.endpoint = endpoint self.credential = credential - self.verbose = verbose ===========changed ref 18=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 20=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) + ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the Vision embeddings API, sleeping before retrying...") - print("Rate limited on the Vision embeddings API, sleeping before retrying...") ===========changed ref 22=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def before_retry_sleep(self, retry_state): - if self.verbose: + logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
tests.test_prepdocslib_textsplitter/test_simpletextsplitter_split_small_pages
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> t = SimpleTextSplitter() <del> t = SimpleTextSplitter(verbose=True)
# module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_small_pages(): <0> t = SimpleTextSplitter(verbose=True) <1> <2> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{"test": "Not a large page"}')])) <3> assert len(split_pages) == 1 <4> assert split_pages[0].page_num == 0 <5> assert split_pages[0].text == '{"test": "Not a large page"}' <6>
===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.page.SplitPage.__init__ self.page_num = page_num self.text = text at: scripts.prepdocslib.textsplitter SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False) at: scripts.prepdocslib.textsplitter.SimpleTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_empty_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(True) assert list(t.split_pages([])) == [] ===========changed ref 1=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(False, True) assert list(t.split_pages([])) == [] ===========changed ref 2=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_small_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text="Not a large page")])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == "Not a large page" ===========changed ref 3=========== # module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot): + text_splitter = SentenceTextSplitter(has_image_embeddings=False) - text_splitter = SentenceTextSplitter(False, True) + pdf_parser = LocalPdfParser() - pdf_parser = LocalPdfParser(verbose=True) for pdf in Path("data").glob("*.pdf"): shutil.copy(str(pdf.absolute()), tmp_path) + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True) files = list_file_strategy.list() processed = 0 results = {} async for file in files: pages = [page async for page in pdf_parser.parse(content=file.content)] assert pages sections = [ Section(split_page, content=file, category="test category") for split_page in text_splitter.split_pages(pages) ] assert sections results[file.filename()] = [section.split_page.text for section in sections] processed += 1 assert processed > 1 # Sort results by key results = {k: results[k] for k in sorted(results)} snapshot.assert_match(json.dumps(results, indent=2), "text_splitter_sections.txt") ===========changed ref 4=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 5=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 12=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 13=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 14=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 15=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 17=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 18=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): self.model_id = model_id self.endpoint = endpoint self.credential = credential - self.verbose = verbose ===========changed ref 19=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 20=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose ===========changed ref 21=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + def remove_file(self, filename: str, oid: str): + if filename is None or filename == "": + logging.warning("Filename is required to remove a file") + return + await self.search_manager.remove_content(filename, oid) +
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_split_pages
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<1>:<add> t = SimpleTextSplitter(max_object_length=max_object_length) <del> t = SimpleTextSplitter(max_object_length=max_object_length, verbose=True)
# module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_pages(): <0> max_object_length = 10 <1> t = SimpleTextSplitter(max_object_length=max_object_length, verbose=True) <2> <3> split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{"test": "Not a large page"}')])) <4> assert len(split_pages) == 3 <5> assert split_pages[0].page_num == 0 <6> assert split_pages[0].text == '{"test": "' <7> assert len(split_pages[0].text) <= max_object_length <8> assert split_pages[1].page_num == 1 <9> assert split_pages[1].text == "Not a larg" <10> assert len(split_pages[1].text) <= max_object_length <11> assert split_pages[2].page_num == 2 <12> assert split_pages[2].text == 'e page"}' <13> assert len(split_pages[2].text) <= max_object_length <14>
===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.page.SplitPage.__init__ self.page_num = page_num self.text = text at: scripts.prepdocslib.textsplitter SimpleTextSplitter(max_object_length: int=1000, verbose: bool=False) at: scripts.prepdocslib.textsplitter.SimpleTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_empty_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(True) assert list(t.split_pages([])) == [] ===========changed ref 1=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(False, True) assert list(t.split_pages([])) == [] ===========changed ref 2=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_small_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{"test": "Not a large page"}')])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == '{"test": "Not a large page"}' ===========changed ref 3=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_small_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text="Not a large page")])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == "Not a large page" ===========changed ref 4=========== # module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_list_parse_and_split(tmp_path, snapshot): + text_splitter = SentenceTextSplitter(has_image_embeddings=False) - text_splitter = SentenceTextSplitter(False, True) + pdf_parser = LocalPdfParser() - pdf_parser = LocalPdfParser(verbose=True) for pdf in Path("data").glob("*.pdf"): shutil.copy(str(pdf.absolute()), tmp_path) + list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) - list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True) files = list_file_strategy.list() processed = 0 results = {} async for file in files: pages = [page async for page in pdf_parser.parse(content=file.content)] assert pages sections = [ Section(split_page, content=file, category="test category") for split_page in text_splitter.split_pages(pages) ] assert sections results[file.filename()] = [section.split_page.text for section in sections] processed += 1 assert processed > 1 # Sort results by key results = {k: results[k] for k in sorted(results)} snapshot.assert_match(json.dumps(results, indent=2), "text_splitter_sections.txt") ===========changed ref 5=========== # module: scripts.prepdocslib.parser class Parser(ABC): - def __init__( - self, - verbose: bool = False, - ): - self.verbose = verbose - ===========changed ref 6=========== # module: scripts.prepdocslib.pdfparser + logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: scripts.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: scripts.prepdocslib.htmlparser + logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.prepdocslib.filestrategy + logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: scripts.prepdocslib.searchmanager + logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings + logger = logging.getLogger("ingester") ===========changed ref 12=========== # module: scripts.prepdocslib.blobmanager + logger = logging.getLogger("ingester") ===========changed ref 13=========== # module: scripts.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") ===========changed ref 14=========== # module: scripts.prepdocs - def is_key_empty(key): - return key is None or len(key.strip()) == 0 - ===========changed ref 15=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): - def __init__(self, path_pattern: str, verbose: bool = False): self.path_pattern = path_pattern - self.verbose = verbose ===========changed ref 16=========== # module: scripts.prepdocslib.textsplitter class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): - def __init__(self, max_object_length: int = 1000, verbose: bool = False): self.max_object_length = max_object_length - self.verbose = verbose ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): - def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]], verbose: bool = False): self.token_provider = token_provider self.endpoint = endpoint - self.verbose = verbose ===========changed ref 18=========== # module: scripts.prepdocslib.filestrategy + class UploadUserFileStrategy: + """ + Strategy for ingesting a file that has already been uploaded to a ADLS2 storage account + """ + ===========changed ref 19=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - model_id="prebuilt-layout", - verbose: bool = False, + self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout" ): self.model_id = model_id self.endpoint = endpoint self.credential = credential - self.verbose = verbose ===========changed ref 20=========== # module: scripts.prepdocslib.strategy class SearchInfo: - def __init__( - self, - endpoint: str, - credential: Union[AsyncTokenCredential, AzureKeyCredential], - index_name: str, - verbose: bool = False, - ): + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): self.endpoint = endpoint self.credential = credential self.index_name = index_name - self.verbose = verbose ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): self.open_ai_model_name = open_ai_model_name self.disable_batch = disable_batch - self.verbose = verbose
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_multilang
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> text_splitter = SentenceTextSplitter(has_image_embeddings=False) <del> text_splitter = SentenceTextSplitter(False, True) <6>:<add> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) <del> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True)
# module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_multilang(test_doc, tmp_path): <0> text_splitter = SentenceTextSplitter(False, True) <1> bpe = tiktoken.encoding_for_model(ENCODING_MODEL) <2> pdf_parser = LocalPdfParser() <3> <4> shutil.copy(str(test_doc.absolute()), tmp_path) <5> <6> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*"), verbose=True) <7> files = list_file_strategy.list() <8> processed = 0 <9> async for file in files: <10> pages = [page async for page in pdf_parser.parse(content=file.content)] <11> assert pages <12> sections = [ <13> Section(split_page, content=file, category="test category") <14> for split_page in text_splitter.split_pages(pages) <15> ] <16> assert sections <17> processed += 1 <18> <19> # Verify the size of the sections <20> token_lengths = [] <21> for section in sections: <22> assert len(section.split_page.text) <= (text_splitter.max_section_length * 1.2) <23> # Verify the number of tokens is below 500 <24> token_lengths.append((len(bpe.encode(section.split_page.text)), len(section.split_page.text))) <25> # verify that none of the numbers in token_lengths are above 500 <26> assert all([tok_len <= text_splitter.max_tokens_per_section for tok_len, _ in token_lengths]), ( <27> test_doc.name, <28> token_lengths, <29> ) <30> assert processed == 1 <31>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: scripts.prepdocslib.listfilestrategy LocalListFileStrategy(path_pattern: str) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy list() -> AsyncGenerator[File, None] at: scripts.prepdocslib.page.SplitPage.__init__ self.text = text at: scripts.prepdocslib.pdfparser LocalPdfParser(verbose: bool=False) at: scripts.prepdocslib.pdfparser.LocalPdfParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page at: scripts.prepdocslib.textsplitter ENCODING_MODEL = "text-embedding-ada-002" SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__ self.max_section_length = DEFAULT_SECTION_LENGTH self.max_tokens_per_section = max_tokens_per_section at: shutil copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] ===========unchanged ref 1=========== at: tiktoken.model encoding_for_model(model_name: str) -> Encoding ===========changed ref 0=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: - if self.verbose: + logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") - print(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # Chunking is disabled when using GPT4V. To be updated in the future. if self.has_image_embeddings: for i, page in enumerate(pages): yield SplitPage(page_num=i, text=page.text) def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping - if self.verbose: - print( + logger.info( + f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" - f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset</s> ===========changed ref 3=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>} table start {last_table_start}" + ) - ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end]) ===========changed ref 4=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_empty_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(True) assert list(t.split_pages([])) == []
tests.test_prepdocslib_textsplitter/test_split_tables
Modified
Azure-Samples~azure-search-openai-demo
87f2b9d9fd554d29f16aead4269be883e8927bb5
Refactoring of prepdocs for easier integration with user upload feature (#1407)
<0>:<add> t = SentenceTextSplitter(has_image_embeddings=False) <del> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True)
# module: tests.test_prepdocslib_textsplitter def test_split_tables(): <0> t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) <1> <2> test_text_without_table = """Contoso Electronics is a leader in the aerospace industry, providing advanced electronic <3> components for both commercial and military aircraft. We specialize in creating cutting- <4> edge systems that are both reliable and efficient. Our mission is to provide the highest <5> quality aircraft components to our customers, while maintaining a commitment to safety <6> and excellence. We are proud to have built a strong reputation in the aerospace industry <7> and strive to continually improve our products and services. Our experienced team of <8> engineers and technicians are dedicated to providing the best products and services to our <9> customers. With our commitment to excellence, we are sure to remain a leader in the <10> aerospace industry for years to come. At Contoso Electronics, we strive to ensure our employees are getting the feedback they <11> need to continue growing and developing in their roles. We understand that performance <12> reviews are a key part of this process and it is important to us that they are conducted in an <13> effective and efficient manner <fable> Performance reviews are conducted annually and are an important part of your career <14> development. During the review, your supervisor will discuss your performance over the <15> past year and provide feedback on areas for improvement. They will also provide you with <16> an opportunity to discuss your goals and objectives for the upcoming year. <17> </table> <18> """ <19> test_text_with_table = test_text_without_table.replace("<fable>", "<table>") <20> <21> split_pages_with_table = list(t.split_pages(pages=[Page(page_num=0, offset=0, text=test_text_with_table)])) <22> split_pages_without_table = list(t.split_pages(pages=[Page(page_num=0, offset=0</s>
===========below chunk 0=========== # module: tests.test_prepdocslib_textsplitter def test_split_tables(): # offset: 1 assert len(split_pages_with_table) == 2 assert split_pages_with_table[0].text != split_pages_without_table[0].text # The table algorithm should move the start of the second section to include the table start # but only in the test text that has a table tag.. assert "<table" in split_pages_with_table[0].text assert "<table" in split_pages_with_table[1].text assert split_pages_with_table[1].text != split_pages_without_table[1].text ===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.page.SplitPage.__init__ self.text = text at: scripts.prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, verbose: bool=False, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] ===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # Chunking is disabled when using GPT4V. To be updated in the future. if self.has_image_embeddings: for i, page in enumerate(pages): yield SplitPage(page_num=i, text=page.text) def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole</s> ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping - if self.verbose: - print( + logger.info( + f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" - f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>} table start {last_table_start}" + ) - ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end]) ===========changed ref 3=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_empty_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(True) assert list(t.split_pages([])) == [] ===========changed ref 4=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_empty_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(False, True) assert list(t.split_pages([])) == [] ===========changed ref 5=========== # module: tests.test_prepdocslib_textsplitter def test_simpletextsplitter_split_small_pages(): + t = SimpleTextSplitter() - t = SimpleTextSplitter(verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text='{"test": "Not a large page"}')])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == '{"test": "Not a large page"}' ===========changed ref 6=========== # module: tests.test_prepdocslib_textsplitter def test_sentencetextsplitter_split_small_pages(): + t = SentenceTextSplitter(has_image_embeddings=False) - t = SentenceTextSplitter(has_image_embeddings=False, verbose=True) split_pages = list(t.split_pages(pages=[Page(page_num=0, offset=0, text="Not a large page")])) assert len(split_pages) == 1 assert split_pages[0].page_num == 0 assert split_pages[0].text == "Not a large page"
scripts.prepdocslib.textsplitter/SentenceTextSplitter.split_pages
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<0>:<del> # Chunking is disabled when using GPT4V. To be updated in the future. <1>:<del> if self.has_image_embeddings: <2>:<del> for i, page in enumerate(pages): <3>:<del> yield SplitPage(page_num=i, text=page.text) <4>:<del>
# module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: <0> # Chunking is disabled when using GPT4V. To be updated in the future. <1> if self.has_image_embeddings: <2> for i, page in enumerate(pages): <3> yield SplitPage(page_num=i, text=page.text) <4> <5> def find_page(offset): <6> num_pages = len(pages) <7> for i in range(num_pages - 1): <8> if offset >= pages[i].offset and offset < pages[i + 1].offset: <9> return pages[i].page_num <10> return pages[num_pages - 1].page_num <11> <12> all_text = "".join(page.text for page in pages) <13> if len(all_text.strip()) == 0: <14> return <15> <16> length = len(all_text) <17> if length <= self.max_section_length: <18> yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) <19> return <20> <21> start = 0 <22> end = length <23> while start + self.section_overlap < length: <24> last_word = -1 <25> end = start + self.max_section_length <26> <27> if end > length: <28> end = length <29> else: <30> # Try to find the end of the sentence <31> while ( <32> end < length <33> and (end - start - self.max_section_length) < self.sentence_search_limit <34> and all_text[end] not in self.sentence_endings <35> ): <36> if all_text[end] in self.word_breaks: <37> last_word = end <38> end += 1 <39> if end < length and all_text[end] not in self.sentence_endings and last_word > 0: <40> end = last_</s>
===========below chunk 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping logger.info( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(</s> ===========below chunk 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s>section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end]) ===========unchanged ref 0=========== at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) SplitPage(page_num: int, text: str) at: scripts.prepdocslib.page.Page.__init__ self.page_num = page_num self.offset = offset self.text = text at: scripts.prepdocslib.textsplitter logger = logging.getLogger("ingester") at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_page_by_max_tokens(page_num: int, text: str) -> Generator[SplitPage, None, None] at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__ self.sentence_endings = STANDARD_SENTENCE_ENDINGS + CJK_SENTENCE_ENDINGS self.word_breaks = STANDARD_WORD_BREAKS + CJK_WORD_BREAKS self.max_section_length = DEFAULT_SECTION_LENGTH self.sentence_search_limit = 100 self.section_overlap = self.max_section_length // DEFAULT_OVERLAP_PERCENT self.has_image_embeddings = has_image_embeddings at: scripts.prepdocslib.textsplitter.TextSplitter split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None] at: typing List = _alias(list, 1, inst=False, name='List') Generator = _alias(collections.abc.Generator, 3)
scripts.prepdocslib.htmlparser/LocalHTMLParser.parse
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<7>:<add> logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) <del> logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)")
# module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> """Parses the given content. <1> To learn more, please visit https://pypi.org/project/beautifulsoup4/ <2> Args: <3> content (IO): The content to parse. <4> Returns: <5> Page: The parsed html Page. <6> """ <7> logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") <8> <9> data = content.read() <10> soup = BeautifulSoup(data, "html.parser") <11> <12> # Get text only from html file <13> result = soup.get_text() <14> <15> yield Page(0, 0, text=cleanup_data(result)) <16>
===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: - # Chunking is disabled when using GPT4V. To be updated in the future. - if self.has_image_embeddings: - for i, page in enumerate(pages): - yield SplitPage(page_num=i, text=page.text) - def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence</s> ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping logger.info( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s> if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])
scripts.prepdocslib.listfilestrategy/LocalListFileStrategy.check_md5
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<14>:<add> logger.info("Skipping %s, no changes detected.", path) <del> logger.info(f"Skipping {path}, no changes detected.")
# module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: <0> # if filename ends in .md5 skip <1> if path.endswith(".md5"): <2> return True <3> <4> # if there is a file called .md5 in this directory, see if its updated <5> stored_hash = None <6> with open(path, "rb") as file: <7> existing_hash = hashlib.md5(file.read()).hexdigest() <8> hash_path = f"{path}.md5" <9> if os.path.exists(hash_path): <10> with open(hash_path, encoding="utf-8") as md5_f: <11> stored_hash = md5_f.read() <12> <13> if stored_hash and stored_hash.strip() == existing_hash.strip(): <14> logger.info(f"Skipping {path}, no changes detected.") <15> return True <16> <17> # Write the hash <18> with open(hash_path, "w", encoding="utf-8") as md5_f: <19> md5_f.write(existing_hash) <20> <21> return False <22>
===========unchanged ref 0=========== at: hashlib md5(string: ReadableBuffer=...) -> _Hash at: hashlib._Hash digest_size: int block_size: int name: str hexdigest() -> str at: io.BufferedRandom read(self, size: Optional[int]=..., /) -> bytes at: io.BufferedReader read(self, size: Optional[int]=..., /) -> bytes at: io.FileIO write(self, b: ReadableBuffer, /) -> int at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os.path exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool at: scripts.prepdocslib.listfilestrategy logger = logging.getLogger("ingester") at: typing.BinaryIO __slots__ = () write(s: AnyStr) -> int at: typing.IO __slots__ = () read(n: int=...) -> AnyStr write(s: AnyStr) -> int ===========changed ref 0=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: - # Chunking is disabled when using GPT4V. To be updated in the future. - if self.has_image_embeddings: - for i, page in enumerate(pages): - yield SplitPage(page_num=i, text=page.text) - def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence</s> ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping logger.info( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section</s> ===========changed ref 3=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s> if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_batch
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<13>:<add> logger.info( <add> "Computed embeddings in batch. Batch size: %d, Token count: %d", <add> len(batch.texts), <add> batch.token_length, <add> ) <del> logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}")
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: <0> batches = self.split_text_into_batches(texts) <1> embeddings = [] <2> client = await self.create_client() <3> for batch in batches: <4> async for attempt in AsyncRetrying( <5> retry=retry_if_exception_type(RateLimitError), <6> wait=wait_random_exponential(min=15, max=60), <7> stop=stop_after_attempt(15), <8> before_sleep=self.before_retry_sleep, <9> ): <10> with attempt: <11> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) <12> embeddings.extend([data.embedding for data in emb_response.data]) <13> logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") <14> <15> return embeddings <16>
===========changed ref 0=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 1=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 2=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: - # Chunking is disabled when using GPT4V. To be updated in the future. - if self.has_image_embeddings: - for i, page in enumerate(pages): - yield SplitPage(page_num=i, text=page.text) - def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence</s> ===========changed ref 3=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping logger.info( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section</s> ===========changed ref 4=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s> if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_single
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<9>:<add> logger.info("Computed embedding for text section. Character count: %d", len(text))
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: <0> client = await self.create_client() <1> async for attempt in AsyncRetrying( <2> retry=retry_if_exception_type(RateLimitError), <3> wait=wait_random_exponential(min=15, max=60), <4> stop=stop_after_attempt(15), <5> before_sleep=self.before_retry_sleep, <6> ): <7> with attempt: <8> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) <9> <10> return emb_response.data[0].embedding <11>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 2=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 3=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: - # Chunking is disabled when using GPT4V. To be updated in the future. - if self.has_image_embeddings: - for i, page in enumerate(pages): - yield SplitPage(page_num=i, text=page.text) - def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence</s> ===========changed ref 4=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield from self.split_page_by_max_tokens(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping logger.info( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section</s> ===========changed ref 5=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 2 <s> if start + self.section_overlap < end: yield from self.split_page_by_max_tokens(page_num=find_page(start), text=all_text[start:end])
scripts.prepdocslib.blobmanager/BlobManager.upload_blob
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<9>:<add> logger.info("Uploading blob for whole file -> %s", blob_name) <del> logger.info(f"\tUploading blob for whole file -> {blob_name}") <12>:<add> if self.store_page_images: <add> if os.path.splitext(file.content.name)[1].lower() == ".pdf": <del> if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": <13>:<add> return await self.upload_pdf_blob_images(service_client, container_client, file) <del> return await self.upload_pdf_blob_images(service_client, container_client, file) <14>:<add> else: <add> logger.info("File %s is not a PDF, skipping image upload", file.content.name)
# module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> await container_client.create_container() <5> <6> # Re-open and upload the original file <7> with open(file.content.name, "rb") as reopened_file: <8> blob_name = BlobManager.blob_name_from_file_name(file.content.name) <9> logger.info(f"\tUploading blob for whole file -> {blob_name}") <10> await container_client.upload_blob(blob_name, reopened_file, overwrite=True) <11> <12> if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": <13> return await self.upload_pdf_blob_images(service_client, container_client, file) <14> <15> return None <16>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 3=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 4=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: - # Chunking is disabled when using GPT4V. To be updated in the future. - if self.has_image_embeddings: - for i, page in enumerate(pages): - yield SplitPage(page_num=i, text=page.text) - def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) if len(all_text.strip()) == 0: return length = len(all_text) if length <= self.max_section_length: yield from self.split_page_by_max_tokens(page_num=find_page(0), text=all_text) return start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence</s>
scripts.prepdocslib.blobmanager/BlobManager.upload_pdf_blob_images
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<15>:<add> logger.info("Unable to find arial.ttf or FreeMono.ttf, using default font") <del> logger.info("\tUnable to find arial.ttf or FreeMono.ttf, using default font") <19>:<add> logger.info("Converting page %s to image and uploading -> %s", i, blob_name) <del> logger.info(f"\tConverting page {i} to image and uploading -> {blob_name}")
# module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: <0> with open(file.content.name, "rb") as reopened_file: <1> reader = PdfReader(reopened_file) <2> page_count = len(reader.pages) <3> doc = fitz.open(file.content.name) <4> sas_uris = [] <5> start_time = datetime.datetime.now(datetime.timezone.utc) <6> expiry_time = start_time + datetime.timedelta(days=1) <7> <8> font = None <9> try: <10> font = ImageFont.truetype("arial.ttf", 20) <11> except OSError: <12> try: <13> font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20) <14> except OSError: <15> logger.info("\tUnable to find arial.ttf or FreeMono.ttf, using default font") <16> <17> for i in range(page_count): <18> blob_name = BlobManager.blob_image_name_from_file_page(file.content.name, i) <19> logger.info(f"\tConverting page {i} to image and uploading -> {blob_name}") <20> <21> doc = fitz.open(file.content.name) <22> page = doc.load_page(i) <23> pix = page.get_pixmap() <24> original_img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) # type: ignore <25> <26> # Create a new image with additional space for text <27> text_height = 40 # Height of the text area <28> new_img = Image.new("RGB", (original_img.width, original_img.height + text_height), "white") <29> <30> # Paste the original image onto the new image <31> new_img.paste(</s>
===========below chunk 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: # offset: 1 # Draw the text on the white area draw = ImageDraw.Draw(new_img) text = f"SourceFileName:{blob_name}" # 10 pixels from the top and left of the image x = 10 y = 10 draw.text((x, y), text, font=font, fill="black") output = io.BytesIO() new_img.save(output, format="PNG") output.seek(0) blob_client = await container_client.upload_blob(blob_name, output, overwrite=True) if not self.user_delegation_key: self.user_delegation_key = await service_client.get_user_delegation_key(start_time, expiry_time) if blob_client.account_name is not None: sas_token = generate_blob_sas( account_name=blob_client.account_name, container_name=blob_client.container_name, blob_name=blob_client.blob_name, user_delegation_key=self.user_delegation_key, permission=BlobSasPermissions(read=True), expiry=expiry_time, start=start_time, ) sas_uris.append(f"{blob_client.url}?{sas_token}") return sas_uris ===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: async with BlobServiceClient( account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() # Re-open and upload the original file with open(file.content.name, "rb") as reopened_file: blob_name = BlobManager.blob_name_from_file_name(file.content.name) + logger.info("Uploading blob for whole file -> %s", blob_name) - logger.info(f"\tUploading blob for whole file -> {blob_name}") await container_client.upload_blob(blob_name, reopened_file, overwrite=True) + if self.store_page_images: + if os.path.splitext(file.content.name)[1].lower() == ".pdf": - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": + return await self.upload_pdf_blob_images(service_client, container_client, file) - return await self.upload_pdf_blob_images(service_client, container_client, file) + else: + logger.info("File %s is not a PDF, skipping image upload", file.content.name) return None ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 2=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 4=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False
scripts.prepdocslib.blobmanager/BlobManager.remove_blob
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<20>:<add> logger.info("Removing blob %s", blob_path) <del> logger.info(f"\tRemoving blob {blob_path}")
# module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> return <5> if path is None: <6> prefix = None <7> blobs = container_client.list_blob_names() <8> else: <9> prefix = os.path.splitext(os.path.basename(path))[0] <10> blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) <11> async for blob_path in blobs: <12> # This still supports PDFs split into individual pages, but we could remove in future to simplify code <13> if ( <14> prefix is not None <15> and ( <16> not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) <17> ) <18> ) or (path is not None and blob_path == os.path.basename(path)): <19> continue <20> logger.info(f"\tRemoving blob {blob_path}") <21> await container_client.delete_blob(blob_path) <22>
===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: async with BlobServiceClient( account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() # Re-open and upload the original file with open(file.content.name, "rb") as reopened_file: blob_name = BlobManager.blob_name_from_file_name(file.content.name) + logger.info("Uploading blob for whole file -> %s", blob_name) - logger.info(f"\tUploading blob for whole file -> {blob_name}") await container_client.upload_blob(blob_name, reopened_file, overwrite=True) + if self.store_page_images: + if os.path.splitext(file.content.name)[1].lower() == ".pdf": - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": + return await self.upload_pdf_blob_images(service_client, container_client, file) - return await self.upload_pdf_blob_images(service_client, container_client, file) + else: + logger.info("File %s is not a PDF, skipping image upload", file.content.name) return None ===========changed ref 1=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: with open(file.content.name, "rb") as reopened_file: reader = PdfReader(reopened_file) page_count = len(reader.pages) doc = fitz.open(file.content.name) sas_uris = [] start_time = datetime.datetime.now(datetime.timezone.utc) expiry_time = start_time + datetime.timedelta(days=1) font = None try: font = ImageFont.truetype("arial.ttf", 20) except OSError: try: font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20) except OSError: + logger.info("Unable to find arial.ttf or FreeMono.ttf, using default font") - logger.info("\tUnable to find arial.ttf or FreeMono.ttf, using default font") for i in range(page_count): blob_name = BlobManager.blob_image_name_from_file_page(file.content.name, i) + logger.info("Converting page %s to image and uploading -> %s", i, blob_name) - logger.info(f"\tConverting page {i} to image and uploading -> {blob_name}") doc = fitz.open(file.content.name) page = doc.load_page(i) pix = page.get_pixmap() original_img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) # type: ignore # Create a new image with additional space for text text_height = 40 # Height of the text area new_img = Image.new("RGB", (original_img.width, original_img.height + text_height), "</s> ===========changed ref 2=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: # offset: 1 <s> new_img = Image.new("RGB", (original_img.width, original_img.height + text_height), "white") # Paste the original image onto the new image new_img.paste(original_img, (0, text_height)) # Draw the text on the white area draw = ImageDraw.Draw(new_img) text = f"SourceFileName:{blob_name}" # 10 pixels from the top and left of the image x = 10 y = 10 draw.text((x, y), text, font=font, fill="black") output = io.BytesIO() new_img.save(output, format="PNG") output.seek(0) blob_client = await container_client.upload_blob(blob_name, output, overwrite=True) if not self.user_delegation_key: self.user_delegation_key = await service_client.get_user_delegation_key(start_time, expiry_time) if blob_client.account_name is not None: sas_token = generate_blob_sas( account_name=blob_client.account_name, container_name=blob_client.container_name, blob_name=blob_client.blob_name, user_delegation_key=self.user_delegation_key, permission=BlobSasPermissions(read=True), expiry=expiry_time, start=start_time, ) sas_uris.append(f"{blob_client.url}?{sas_token}") return sas_uris ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 4=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result))
scripts.prepdocslib.searchmanager/SearchManager.create_index
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<0>:<add> logger.info("Ensuring search index %s exists", self.search_info.index_name) <del> logger.info(f"Ensuring search index {self.search_info.index_name} exists")
# module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): <0> logger.info(f"Ensuring search index {self.search_info.index_name} exists") <1> <2> async with self.search_info.create_search_index_client() as search_index_client: <3> fields = [ <4> ( <5> SimpleField(name="id", type="Edm.String", key=True) <6> if not self.use_int_vectorization <7> else SearchField( <8> name="id", <9> type="Edm.String", <10> key=True, <11> sortable=True, <12> filterable=True, <13> facetable=True, <14> analyzer_name="keyword", <15> ) <16> ), <17> SearchableField( <18> name="content", <19> type="Edm.String", <20> analyzer_name=self.search_analyzer_name, <21> ), <22> SearchField( <23> name="embedding", <24> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <25> hidden=False, <26> searchable=True, <27> filterable=False, <28> sortable=False, <29> facetable=False, <30> vector_search_dimensions=1536, <31> vector_search_profile_name="embedding_config", <32> ), <33> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <34> SimpleField( <35> name="sourcepage", <36> type="Edm.String", <37> filterable=True, <38> facetable=True, <39> ), <40> SimpleField( <41> name="sourcefile", <42> type="Edm.String", <43> filterable=True, <44> facetable=True, <45> ), <46> ] <47> if self.use_acls: <48> fields.append( <49> SimpleField( <50> name="oids", <51> type=Search</s>
===========below chunk 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 filterable=True, ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) if self.use_int_vectorization: fields.append(SearchableField(name="parent_id", type="Edm.String", filterable=True)) if self.search_images: fields.append( SearchField( name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1024, vector_search_profile_name="embedding_config", ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_search=SemanticSearch( configurations=[ SemanticConfiguration( name="default", prioritized_fields=SemanticPrioritizedFields( title_field=None, content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswAlgorithmConfiguration( name="hnsw_config", parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm_configuration_name="hnsw_config", vectorizer=( f"{self.search_info.index_name}-vectorizer" if self.use_int_vectorization else None ), ), ], vectorizers=vectorizers, ), ) if self.search_info.index_</s> ===========below chunk 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s> ], vectorizers=vectorizers, ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: logger.info(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: logger.info(f"Search index {self.search_info.index_name} already exists") ===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.searchmanager logger = logging.getLogger("ingester") at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.search_images = search_images at: scripts.prepdocslib.strategy.SearchInfo create_search_index_client() -> SearchIndexClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 3=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False
scripts.prepdocslib.searchmanager/SearchManager.remove_content
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<0>:<add> logger.info( <add> "Removing sections from '{%s or '<all>'}' from search index '%s'", path, self.search_info.index_name <del> logger.info(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") <1>:<add> ) <13>:<add> logger.info("Removed %d sections from index", len(removed_docs)) <del> logger.info(f"\tRemoved {len(removed_docs)} sections from index")
# module: scripts.prepdocslib.searchmanager class SearchManager: def remove_content(self, path: Optional[str] = None, only_oid: Optional[str] = None): <0> logger.info(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") <1> async with self.search_info.create_search_client() as search_client: <2> while True: <3> filter = None if path is None else f"sourcefile eq '{os.path.basename(path)}'" <4> result = await search_client.search("", filter=filter, top=1000, include_total_count=True) <5> if await result.get_count() == 0: <6> break <7> documents_to_remove = [] <8> async for document in result: <9> # If only_oid is set, only remove documents that have only this oid <10> if not only_oid or document["oids"] == [only_oid]: <11> documents_to_remove.append({"id": document["id"]}) <12> removed_docs = await search_client.delete_documents(documents_to_remove) <13> logger.info(f"\tRemoved {len(removed_docs)} sections from index") <14> # It can take a few seconds for search results to reflect changes, so wait a bit <15> await asyncio.sleep(2) <16>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.searchmanager logger = logging.getLogger("ingester") at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info at: scripts.prepdocslib.strategy.SearchInfo create_search_client() -> SearchClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name ===========changed ref 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): + logger.info("Ensuring search index %s exists", self.search_info.index_name) - logger.info(f"Ensuring search index {self.search_info.index_name} exists") async with self.search_info.create_search_index_client() as search_index_client: fields = [ ( SimpleField(name="id", type="Edm.String", key=True) if not self.use_int_vectorization else SearchField( name="id", type="Edm.String", key=True, sortable=True, filterable=True, facetable=True, analyzer_name="keyword", ) ), SearchableField( name="content", type="Edm.String", analyzer_name=self.search_analyzer_name, ), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_profile_name="embedding_config", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField( name="sourcepage", type="Edm.String", filterable=True, facetable=True, ), SimpleField( name="sourcefile", type="Edm.String", filterable=True, facetable=True, ), ] if self.use_acls: fields.append( SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) </s> ===========changed ref 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 <s> type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) if self.use_int_vectorization: fields.append(SearchableField(name="parent_id", type="Edm.String", filterable=True)) if self.search_images: fields.append( SearchField( name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1024, vector_search_profile_name="embedding_config", ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_search=SemanticSearch( configurations=[ SemanticConfiguration( name="default", prioritized_fields=SemanticPrioritizedFields( title_field=None, content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswAlgorithmConfiguration( name="hnsw_config", parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm_configuration_name="hnsw_config", vectorizer=( f"{self.search_info.index_name}-vectorizer</s> ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s> self.use_int_vectorization else None ), ), ], vectorizers=vectorizers, ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: + logger.info("Creating %s search index", self.search_info.index_name) - logger.info(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: + logger.info("Search index %s already exists", self.search_info.index_name) - logger.info(f"Search index {self.search_info.index_name} already exists") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 4=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result))
scripts.prepdocslib.pdfparser/LocalPdfParser.parse
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<0>:<add> logger.info("Extracting text from '%s' using local PDF parser (pypdf)", content.name) <del> logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)")
# module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") <1> <2> reader = PdfReader(content) <3> pages = reader.pages <4> offset = 0 <5> for page_num, p in enumerate(pages): <6> page_text = p.extract_text() <7> yield Page(page_num=page_num, offset=offset, text=page_text) <8> offset += len(page_text) <9>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.parser.Parser parse(self, content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.pdfparser logger = logging.getLogger("ingester") at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) IO() at: typing.IO __slots__ = () ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 1=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 3=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue + logger.info("Removing blob %s", blob_path) - logger.info(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path) ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def remove_content(self, path: Optional[str] = None, only_oid: Optional[str] = None): + logger.info( + "Removing sections from '{%s or '<all>'}' from search index '%s'", path, self.search_info.index_name - logger.info(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'") + ) async with self.search_info.create_search_client() as search_client: while True: filter = None if path is None else f"sourcefile eq '{os.path.basename(path)}'" result = await search_client.search("", filter=filter, top=1000, include_total_count=True) if await result.get_count() == 0: break documents_to_remove = [] async for document in result: # If only_oid is set, only remove documents that have only this oid if not only_oid or document["oids"] == [only_oid]: documents_to_remove.append({"id": document["id"]}) removed_docs = await search_client.delete_documents(documents_to_remove) + logger.info("Removed %d sections from index", len(removed_docs)) - logger.info(f"\tRemoved {len(removed_docs)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit await asyncio.sleep(2)
scripts.prepdocslib.pdfparser/DocumentAnalysisParser.parse
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<0>:<add> logger.info("Extracting text from '%s' using Azure Document Intelligence", content.name) <del> logger.info(f"Extracting text from '{content.name}' using Azure Document Intelligence")
# module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> logger.info(f"Extracting text from '{content.name}' using Azure Document Intelligence") <1> <2> async with DocumentIntelligenceClient( <3> endpoint=self.endpoint, credential=self.credential <4> ) as document_intelligence_client: <5> poller = await document_intelligence_client.begin_analyze_document( <6> model_id=self.model_id, analyze_request=content, content_type="application/octet-stream" <7> ) <8> form_recognizer_results = await poller.result() <9> <10> offset = 0 <11> for page_num, page in enumerate(form_recognizer_results.pages): <12> tables_on_page = [ <13> table <14> for table in (form_recognizer_results.tables or []) <15> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 <16> ] <17> <18> # mark all positions of the table spans in the page <19> page_offset = page.spans[0].offset <20> page_length = page.spans[0].length <21> table_chars = [-1] * page_length <22> for table_id, table in enumerate(tables_on_page): <23> for span in table.spans: <24> # replace all table spans with "table_id" in table_chars array <25> for i in range(span.length): <26> idx = span.offset - page_offset + i <27> if idx >= 0 and idx < page_length: <28> table_chars[idx] = table_id <29> <30> # build page text by replacing characters in table spans with table html <31> page_text = "" <32> added_tables = set() <33> for idx, table_id in enumerate(table_chars): <34> if table_id == -1: <35> page_text += form_recognizer_results.content[page_</s>
===========below chunk 0=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: # offset: 1 elif table_id not in added_tables: page_text += DocumentAnalysisParser.table_to_html(tables_on_page[table_id]) added_tables.add(table_id) yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.page Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.parser.Parser parse(self, content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.pdfparser logger = logging.getLogger("ingester") DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout") at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser table_to_html(table: DocumentTable) at: scripts.prepdocslib.pdfparser.DocumentAnalysisParser.__init__ self.model_id = model_id self.endpoint = endpoint self.credential = credential at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) IO() ===========changed ref 0=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: + logger.info("Extracting text from '%s' using local PDF parser (pypdf)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 2=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 4=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue + logger.info("Removing blob %s", blob_path) - logger.info(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path)
scripts.prepdocs/setup_list_file_strategy
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<5>:<add> logger.info("Using Data Lake Gen2 Storage Account: %s", datalake_storage_account) <del> logger.info(f"Using Data Lake Gen2 Storage Account {datalake_storage_account}") <13>:<add> logger.info("Using local files: %s", local_files) <del> logger.info(f"Using local files in {local_files}")
# module: scripts.prepdocs def setup_list_file_strategy( azure_credential: AsyncTokenCredential, local_files: Union[str, None], datalake_storage_account: Union[str, None], datalake_filesystem: Union[str, None], datalake_path: Union[str, None], datalake_key: Union[str, None], ): <0> list_file_strategy: ListFileStrategy <1> if datalake_storage_account: <2> if datalake_filesystem is None or datalake_path is None: <3> raise ValueError("DataLake file system and path are required when using Azure Data Lake Gen2") <4> adls_gen2_creds: Union[AsyncTokenCredential, str] = azure_credential if datalake_key is None else datalake_key <5> logger.info(f"Using Data Lake Gen2 Storage Account {datalake_storage_account}") <6> list_file_strategy = ADLSGen2ListFileStrategy( <7> data_lake_storage_account=datalake_storage_account, <8> data_lake_filesystem=datalake_filesystem, <9> data_lake_path=datalake_path, <10> credential=adls_gen2_creds, <11> ) <12> elif local_files: <13> logger.info(f"Using local files in {local_files}") <14> list_file_strategy = LocalListFileStrategy(path_pattern=local_files) <15> else: <16> raise ValueError("Either local_files or datalake_storage_account must be provided.") <17> return list_file_strategy <18>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.listfilestrategy ListFileStrategy() LocalListFileStrategy(path_pattern: str) ADLSGen2ListFileStrategy(data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str]) at: scripts.prepdocs logger = logging.getLogger("ingester") ===========changed ref 0=========== + # module: tests.test_fetch_image + + ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: + logger.info("Extracting text from '%s' using local PDF parser (pypdf)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue + logger.info("Removing blob %s", blob_path) - logger.info(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path)
scripts.prepdocslib.filestrategy/parse_file
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<3>:<add> logger.info("Skipping '%s', no parser found.", file.filename()) <del> logger.info(f"Skipping '{file.filename()}', no parser found.") <5>:<add> logger.info("Ingesting '%s'", file.filename()) <del> logger.info(f"Parsing '{file.filename()}'") <7>:<add> logger.info("Splitting '%s' into sections", file.filename()) <add> if image_embeddings: <add> logger.warning("Each page will be split into smaller chunks of text, but images will be of the entire page.") <del> logger.info(f"Splitting '{file.filename()}' into sections")
# module: scripts.prepdocslib.filestrategy def parse_file( + file: File, + file_processors: dict[str, FileProcessor], + category: Optional[str] = None, + image_embeddings: Optional[ImageEmbeddings] = None, - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None ) -> List[Section]: <0> key = file.file_extension() <1> processor = file_processors.get(key) <2> if processor is None: <3> logger.info(f"Skipping '{file.filename()}', no parser found.") <4> return [] <5> logger.info(f"Parsing '{file.filename()}'") <6> pages = [page async for page in processor.parser.parse(content=file.content)] <7> logger.info(f"Splitting '{file.filename()}' into sections") <8> sections = [ <9> Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) <10> ] <11> return sections <12>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.embeddings ImageEmbeddings(endpoint: str, token_provider: Callable[[], Awaitable[str]]) at: scripts.prepdocslib.fileprocessor FileProcessor(parser: Parser, splitter: TextSplitter) at: scripts.prepdocslib.fileprocessor.FileProcessor parser: Parser splitter: TextSplitter at: scripts.prepdocslib.filestrategy logger = logging.getLogger("ingester") at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File filename() file_extension() at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: scripts.prepdocslib.parser.Parser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: typing List = _alias(list, 1, inst=False, name='List') at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: tests.test_fetch_image + + ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: + logger.info("Extracting text from '%s' using local PDF parser (pypdf)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 5=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue + logger.info("Removing blob %s", blob_path) - logger.info(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path)
scripts.prepdocslib.filestrategy/FileStrategy.run
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<7>:<add> sections = await parse_file(file, self.file_processors, self.category, self.image_embeddings) <del> sections = await parse_file(file, self.file_processors, self.category)
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): def run(self): <0> search_manager = SearchManager( <1> self.search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings <2> ) <3> if self.document_action == DocumentAction.Add: <4> files = self.list_file_strategy.list() <5> async for file in files: <6> try: <7> sections = await parse_file(file, self.file_processors, self.category) <8> if sections: <9> blob_sas_uris = await self.blob_manager.upload_blob(file) <10> blob_image_embeddings: Optional[List[List[float]]] = None <11> if self.image_embeddings and blob_sas_uris: <12> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <13> await search_manager.update_content(sections, blob_image_embeddings) <14> finally: <15> if file: <16> file.close() <17> elif self.document_action == DocumentAction.Remove: <18> paths = self.list_file_strategy.list_paths() <19> async for path in paths: <20> await self.blob_manager.remove_blob(path) <21> await search_manager.remove_content(path) <22> elif self.document_action == DocumentAction.RemoveAll: <23> await self.blob_manager.remove_blob() <24> await search_manager.remove_content() <25>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager upload_blob(file: File) -> Optional[List[str]] at: scripts.prepdocslib.embeddings.ImageEmbeddings create_embeddings(blob_urls: List[str]) -> List[List[float]] at: scripts.prepdocslib.filestrategy parse_file(file: File, file_processors: dict[str, FileProcessor], category: Optional[str]=None, image_embeddings: Optional[ImageEmbeddings]=None) -> List[Section] at: scripts.prepdocslib.filestrategy.FileStrategy.__init__ self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.file_processors = file_processors self.document_action = document_action self.embeddings = embeddings self.image_embeddings = image_embeddings self.search_analyzer_name = search_analyzer_name self.search_info = search_info self.use_acls = use_acls self.category = category at: scripts.prepdocslib.filestrategy.FileStrategy.setup search_manager = SearchManager( self.search_info, self.search_analyzer_name, self.use_acls, False, self.embeddings, search_images=self.image_embeddings is not None, ) at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list() -> AsyncGenerator[File, None] list_paths() -> AsyncGenerator[str, None] at: scripts.prepdocslib.searchmanager SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) ===========unchanged ref 1=========== at: scripts.prepdocslib.searchmanager.SearchManager create_index(vectorizers: Optional[List[VectorSearchVectorizer]]=None) update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None) at: scripts.prepdocslib.strategy DocumentAction() at: scripts.prepdocslib.strategy.Strategy run(self) at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.filestrategy def parse_file( + file: File, + file_processors: dict[str, FileProcessor], + category: Optional[str] = None, + image_embeddings: Optional[ImageEmbeddings] = None, - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None ) -> List[Section]: key = file.file_extension() processor = file_processors.get(key) if processor is None: + logger.info("Skipping '%s', no parser found.", file.filename()) - logger.info(f"Skipping '{file.filename()}', no parser found.") return [] + logger.info("Ingesting '%s'", file.filename()) - logger.info(f"Parsing '{file.filename()}'") pages = [page async for page in processor.parser.parse(content=file.content)] + logger.info("Splitting '%s' into sections", file.filename()) + if image_embeddings: + logger.warning("Each page will be split into smaller chunks of text, but images will be of the entire page.") - logger.info(f"Splitting '{file.filename()}' into sections") sections = [ Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) ] return sections ===========changed ref 1=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File) -> Optional[List[str]]: async with BlobServiceClient( account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() # Re-open and upload the original file with open(file.content.name, "rb") as reopened_file: blob_name = BlobManager.blob_name_from_file_name(file.content.name) + logger.info("Uploading blob for whole file -> %s", blob_name) - logger.info(f"\tUploading blob for whole file -> {blob_name}") await container_client.upload_blob(blob_name, reopened_file, overwrite=True) + if self.store_page_images: + if os.path.splitext(file.content.name)[1].lower() == ".pdf": - if self.store_page_images and os.path.splitext(file.content.name)[1].lower() == ".pdf": + return await self.upload_pdf_blob_images(service_client, container_client, file) - return await self.upload_pdf_blob_images(service_client, container_client, file) + else: + logger.info("File %s is not a PDF, skipping image upload", file.content.name) return None ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): + logger.info("Ensuring search index %s exists", self.search_info.index_name) - logger.info(f"Ensuring search index {self.search_info.index_name} exists") async with self.search_info.create_search_index_client() as search_index_client: fields = [ ( SimpleField(name="id", type="Edm.String", key=True) if not self.use_int_vectorization else SearchField( name="id", type="Edm.String", key=True, sortable=True, filterable=True, facetable=True, analyzer_name="keyword", ) ), SearchableField( name="content", type="Edm.String", analyzer_name=self.search_analyzer_name, ), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_profile_name="embedding_config", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField( name="sourcepage", type="Edm.String", filterable=True, facetable=True, ), SimpleField( name="sourcefile", type="Edm.String", filterable=True, facetable=True, ), ] if self.use_acls: fields.append( SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) </s>
app.backend.core.imageshelper/download_blob_as_base64
Modified
Azure-Samples~azure-search-openai-demo
d896376069d3350dc733638ae86e811beba53d9f
Don't disable text chunking when GPT4vision is enabled (#1355)
<1>:<add> image_filename = base_name + ".png" <add> try: <add> blob = await blob_container_client.get_blob_client(image_filename).download_blob() <del> blob = await blob_container_client.get_blob_client(base_name + ".png").download_blob() <2>:<add> if not blob.properties: <add> logging.warning(f"No blob exists for {image_filename}") <add> return None <add> img = base64.b64encode(await blob.readall()).decode("utf-8") <add> return f"data:image/png;base64,{img}" <add> except ResourceNotFoundError: <add> logging.warning(f"No blob exists for {image_filename}") <add> return None <3>:<del> if not blob.properties: <4>:<del> return None <5>:<del> img = base64.b64encode(await blob.readall()).decode("utf-8") <6>:<del> return f"data:image/png;base64,{img}" <7>:<del>
# module: app.backend.core.imageshelper def download_blob_as_base64(blob_container_client: ContainerClient, file_path: str) -> Optional[str]: <0> base_name, _ = os.path.splitext(file_path) <1> blob = await blob_container_client.get_blob_client(base_name + ".png").download_blob() <2> <3> if not blob.properties: <4> return None <5> img = base64.b64encode(await blob.readall()).decode("utf-8") <6> return f"data:image/png;base64,{img}" <7>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] ===========changed ref 0=========== + # module: tests.test_fetch_image + + ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class LocalPdfParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: + logger.info("Extracting text from '%s' using local PDF parser (pypdf)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local PDF parser (pypdf)") reader = PdfReader(content) pages = reader.pages offset = 0 for page_num, p in enumerate(pages): page_text = p.extract_text() yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) + logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 3=========== # module: scripts.prepdocslib.htmlparser class LocalHTMLParser(Parser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: """Parses the given content. To learn more, please visit https://pypi.org/project/beautifulsoup4/ Args: content (IO): The content to parse. Returns: Page: The parsed html Page. """ + logger.info("Extracting text from '%s' using local HTML parser (BeautifulSoup)", content.name) - logger.info(f"\tExtracting text from '{content.name}' using local HTML parser (BeautifulSoup)") data = content.read() soup = BeautifulSoup(data, "html.parser") # Get text only from html file result = soup.get_text() yield Page(0, 0, text=cleanup_data(result)) ===========changed ref 4=========== # module: scripts.prepdocslib.filestrategy def parse_file( + file: File, + file_processors: dict[str, FileProcessor], + category: Optional[str] = None, + image_embeddings: Optional[ImageEmbeddings] = None, - file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None ) -> List[Section]: key = file.file_extension() processor = file_processors.get(key) if processor is None: + logger.info("Skipping '%s', no parser found.", file.filename()) - logger.info(f"Skipping '{file.filename()}', no parser found.") return [] + logger.info("Ingesting '%s'", file.filename()) - logger.info(f"Parsing '{file.filename()}'") pages = [page async for page in processor.parser.parse(content=file.content)] + logger.info("Splitting '%s' into sections", file.filename()) + if image_embeddings: + logger.warning("Each page will be split into smaller chunks of text, but images will be of the entire page.") - logger.info(f"Splitting '{file.filename()}' into sections") sections = [ Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) ] return sections ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) + logger.info( + "Computed embeddings in batch. Batch size: %d, Token count: %d", + len(batch.texts), + batch.token_length, + ) - logger.info(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings ===========changed ref 6=========== # module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: # if filename ends in .md5 skip if path.endswith(".md5"): return True # if there is a file called .md5 in this directory, see if its updated stored_hash = None with open(path, "rb") as file: existing_hash = hashlib.md5(file.read()).hexdigest() hash_path = f"{path}.md5" if os.path.exists(hash_path): with open(hash_path, encoding="utf-8") as md5_f: stored_hash = md5_f.read() if stored_hash and stored_hash.strip() == existing_hash.strip(): + logger.info("Skipping %s, no changes detected.", path) - logger.info(f"Skipping {path}, no changes detected.") return True # Write the hash with open(hash_path, "w", encoding="utf-8") as md5_f: md5_f.write(existing_hash) return False ===========changed ref 7=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code if ( prefix is not None and ( not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) ) ) or (path is not None and blob_path == os.path.basename(path)): continue + logger.info("Removing blob %s", blob_path) - logger.info(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path)
scripts.prepdocslib.textsplitter/SentenceTextSplitter.split_page_by_max_tokens
Modified
Azure-Samples~azure-search-openai-demo
a45774a90d8557b90f89f06be7e888b8ee476bb8
Ensure there are no zero-length sections for batch API (#1423)
<30>:<add> middle = int(len(text) // 2) <add> overlap = int(len(text) * (DEFAULT_OVERLAP_PERCENT / 100)) <del> first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))] <31>:<add> first_half = text[: middle + overlap] <add> second_half = text[middle - overlap :] <del> second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :]
# module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]: <0> """ <1> Recursively splits page by maximum number of tokens to better handle languages with higher token/word ratios. <2> """ <3> tokens = bpe.encode(text) <4> if len(tokens) <= self.max_tokens_per_section: <5> # Section is already within max tokens, return <6> yield SplitPage(page_num=page_num, text=text) <7> else: <8> # Start from the center and try and find the closest sentence ending by spiralling outward. <9> # IF we get to the outer thirds, then just split in half with a 5% overlap <10> start = int(len(text) // 2) <11> pos = 0 <12> boundary = int(len(text) // 3) <13> split_position = -1 <14> while start - pos > boundary: <15> if text[start - pos] in self.sentence_endings: <16> split_position = start - pos <17> break <18> elif text[start + pos] in self.sentence_endings: <19> split_position = start + pos <20> break <21> else: <22> pos += 1 <23> <24> if split_position > 0: <25> first_half = text[: split_position + 1] <26> second_half = text[split_position + 1 :] <27> else: <28> # Split page in half and call function again <29> # Overlap first and second halves by DEFAULT_OVERLAP_PERCENT% <30> first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))] <31> second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :] <32> yield from self.split_page_by_max_tokens(page_num, first_half) <33> yield from self.</s>
===========below chunk 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]: # offset: 1 ===========unchanged ref 0=========== at: scripts.prepdocslib.page SplitPage(page_num: int, text: str) at: scripts.prepdocslib.textsplitter bpe = tiktoken.encoding_for_model(ENCODING_MODEL) DEFAULT_OVERLAP_PERCENT = 10 # See semantic search article for 10% overlap performance at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__ self.sentence_endings = STANDARD_SENTENCE_ENDINGS + CJK_SENTENCE_ENDINGS self.max_tokens_per_section = max_tokens_per_section at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] at: typing Generator = _alias(collections.abc.Generator, 3)
tests.test_prepdocslib_textsplitter/test_sentencetextsplitter_multilang
Modified
Azure-Samples~azure-search-openai-demo
a45774a90d8557b90f89f06be7e888b8ee476bb8
Ensure there are no zero-length sections for batch API (#1423)
<22>:<add> assert section.split_page.text != ""
# module: tests.test_prepdocslib_textsplitter @pytest.mark.asyncio async def test_sentencetextsplitter_multilang(test_doc, tmp_path): <0> text_splitter = SentenceTextSplitter(has_image_embeddings=False) <1> bpe = tiktoken.encoding_for_model(ENCODING_MODEL) <2> pdf_parser = LocalPdfParser() <3> <4> shutil.copy(str(test_doc.absolute()), tmp_path) <5> <6> list_file_strategy = LocalListFileStrategy(path_pattern=str(tmp_path / "*")) <7> files = list_file_strategy.list() <8> processed = 0 <9> async for file in files: <10> pages = [page async for page in pdf_parser.parse(content=file.content)] <11> assert pages <12> sections = [ <13> Section(split_page, content=file, category="test category") <14> for split_page in text_splitter.split_pages(pages) <15> ] <16> assert sections <17> processed += 1 <18> <19> # Verify the size of the sections <20> token_lengths = [] <21> for section in sections: <22> assert len(section.split_page.text) <= (text_splitter.max_section_length * 1.2) <23> # Verify the number of tokens is below 500 <24> token_lengths.append((len(bpe.encode(section.split_page.text)), len(section.split_page.text))) <25> # verify that none of the numbers in token_lengths are above 500 <26> assert all([tok_len <= text_splitter.max_tokens_per_section for tok_len, _ in token_lengths]), ( <27> test_doc.name, <28> token_lengths, <29> ) <30> assert processed == 1 <31>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: scripts.prepdocslib.listfilestrategy LocalListFileStrategy(path_pattern: str) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy list() -> AsyncGenerator[File, None] at: scripts.prepdocslib.page.SplitPage.__init__ self.text = text at: scripts.prepdocslib.pdfparser LocalPdfParser() at: scripts.prepdocslib.pdfparser.LocalPdfParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page at: scripts.prepdocslib.textsplitter ENCODING_MODEL = "text-embedding-ada-002" SentenceTextSplitter(has_image_embeddings: bool, max_tokens_per_section: int=500) at: scripts.prepdocslib.textsplitter.SentenceTextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: scripts.prepdocslib.textsplitter.SentenceTextSplitter.__init__ self.max_section_length = DEFAULT_SECTION_LENGTH self.max_tokens_per_section = max_tokens_per_section at: shutil copy(src: StrPath, dst: StrPath, *, follow_symlinks: bool=...) -> _PathReturn at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] ===========unchanged ref 1=========== at: tiktoken.model encoding_for_model(model_name: str) -> Encoding ===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]: """ Recursively splits page by maximum number of tokens to better handle languages with higher token/word ratios. """ tokens = bpe.encode(text) if len(tokens) <= self.max_tokens_per_section: # Section is already within max tokens, return yield SplitPage(page_num=page_num, text=text) else: # Start from the center and try and find the closest sentence ending by spiralling outward. # IF we get to the outer thirds, then just split in half with a 5% overlap start = int(len(text) // 2) pos = 0 boundary = int(len(text) // 3) split_position = -1 while start - pos > boundary: if text[start - pos] in self.sentence_endings: split_position = start - pos break elif text[start + pos] in self.sentence_endings: split_position = start + pos break else: pos += 1 if split_position > 0: first_half = text[: split_position + 1] second_half = text[split_position + 1 :] else: # Split page in half and call function again # Overlap first and second halves by DEFAULT_OVERLAP_PERCENT% + middle = int(len(text) // 2) + overlap = int(len(text) * (DEFAULT_OVERLAP_PERCENT / 100)) - first_half = text[: int(len(text) // (2.0 + (DEFAULT_OVERLAP_PERCENT / 100)))] + first_half = text[: middle + overlap] + second_half = text[middle - overlap :] - second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100</s> ===========changed ref 1=========== # module: scripts.prepdocslib.textsplitter class SentenceTextSplitter(TextSplitter): def split_page_by_max_tokens(self, page_num: int, text: str) -> Generator[SplitPage, None, None]: # offset: 1 <s> second_half = text[int(len(text) // (1.0 - (DEFAULT_OVERLAP_PERCENT / 100))) :] yield from self.split_page_by_max_tokens(page_num, first_half) yield from self.split_page_by_max_tokens(page_num, second_half)
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run
Modified
Azure-Samples~azure-search-openai-demo
ccf2494f3eadfae02126c2ec6b3be74d38e83618
Add minimum score criteria for AI search results (#1417)
<12>:<add> minimum_search_score = overrides.get("minimum_search_score", 0.0) <add> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0)
# module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> vector_fields = overrides.get("vector_fields", ["embedding"]) <6> <7> include_gtpV_text = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] <8> include_gtpV_images = overrides.get("gpt4v_input") in ["textAndImages", "images", None] <9> <10> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <11> top = overrides.get("top", 3) <12> filter = self.build_filter(overrides, auth_claims) <13> use_semantic_ranker = overrides.get("semantic_ranker") and has_text <14> <15> # If retrieval mode includes vectors, compute an embedding for the query <16> <17> vectors = [] <18> if has_vector: <19> for field in vector_fields: <20> vector = ( <21> await self.compute_text_embedding(q) <22> if field == "embedding" <23> else await self.compute_image_embedding(q) <24> ) <25> vectors.append(vector) <26> <27> # Only keep the text query if the retrieval mode uses text, otherwise drop it <28> query_text = q if has_text else</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] template = overrides.get("prompt_template", self.system_chat_template_gpt4v) model = self.gpt4v_model message_builder = MessageBuilder(template, model) # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if include_gtpV_text: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if include_gtpV_images: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) # Append user message message_builder.insert_message("user", user_content) updated_messages = message_builder.messages chat_completion = ( await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=1024, n=1, ) ).model_dump() data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", query_text, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.gpt4v_model, "deployment": self.gpt4v_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } chat_</s> ===========below chunk 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 3 <s>choices"][0]["context"] = extra_info chat_completion["choices"][0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach system_chat_template_gpt4v = ( "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned " + "If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts " ) at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__ self.blob_container_client = blob_container_client self.openai_client = openai_client self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]
app.backend.approaches.approach/Approach.search
Modified
Azure-Samples~azure-search-openai-demo
ccf2494f3eadfae02126c2ec6b3be74d38e83618
Add minimum score criteria for AI search results (#1417)
# module: app.backend.approaches.approach class Approach(ABC): def search( self, top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_semantic_ranker: bool, use_semantic_captions: bool, + minimum_search_score: Optional[float], + minimum_reranker_score: Optional[float], ) -> List[Document]: <0> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <1> if use_semantic_ranker and query_text: <2> results = await self.search_client.search( <3> search_text=query_text, <4> filter=filter, <5> query_type=QueryType.SEMANTIC, <6> query_language=self.query_language, <7> query_speller=self.query_speller, <8> semantic_configuration_name="default", <9> top=top, <10> query_caption="extractive|highlight-false" if use_semantic_captions else None, <11> vector_queries=vectors, <12> ) <13> else: <14> results = await self.search_client.search( <15> search_text=query_text or "", filter=filter, top=top, vector_queries=vectors <16> ) <17> <18> documents = [] <19> async for page in results.by_page(): <20> async for document in page: <21> documents.append( <22> Document( <23> id=document.get("id"), <24> content=document.get("content"), <25> embedding=document.get("embedding"), <26> image_embedding=document.get("imageEmbedding"), <27> category=document.get("category"), <28> sourcepage=document.get("sourcepage"), <29> sourcefile=document.get("sourcefile"), <30> oids=document.get("oids"), <31> groups=document.get("groups"), <32> captions=cast(List[</s>
===========below chunk 0=========== <s>: app.backend.approaches.approach class Approach(ABC): def search( self, top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_semantic_ranker: bool, use_semantic_captions: bool, + minimum_search_score: Optional[float], + minimum_reranker_score: Optional[float], ) -> List[Document]: # offset: 1 score=document.get("@search.score"), reranker_score=document.get("@search.reranker_score"), ) ) return documents ===========unchanged ref 0=========== at: app.backend.approaches.approach Document(id: Optional[str], content: Optional[str], embedding: Optional[List[float]], image_embedding: Optional[List[float]], category: Optional[str], sourcepage: Optional[str], sourcefile: Optional[str], oids: Optional[List[str]], groups: Optional[List[str]], captions: List[QueryCaptionResult], score: Optional[float]=None, reranker_score: Optional[float]=None) at: app.backend.approaches.approach.Approach.__init__ self.search_client = search_client self.query_language = query_language self.query_speller = query_speller at: app.backend.approaches.approach.Document id: Optional[str] content: Optional[str] embedding: Optional[List[float]] image_embedding: Optional[List[float]] category: Optional[str] sourcepage: Optional[str] sourcefile: Optional[str] oids: Optional[List[str]] groups: Optional[List[str]] captions: List[QueryCaptionResult] score: Optional[float] = None reranker_score: Optional[float] = None at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: tests.test_chatapproach + def mock_search(*args, **kwargs): + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) + ===========changed ref 1=========== <s> (0, 0, 1), + (0, 2, 1), + (0.03, 0, 1), + (0.03, 2, 1), + (1, 0, 0), + (0, 4, 0), + (1, 4, 0), + ], + ) + async def test_search_results_filtering_by_scores( + monkeypatch, minimum_search_score, minimum_reranker_score, expected_result_count + ): + chat_approach = ChatReadRetrieveReadApproach( + search_client=SearchClient(endpoint="", index_name="", credential=AzureKeyCredential("")), + auth_helper=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + + monkeypatch.setattr(SearchClient, "search", mock_search) + + filtered_results = await chat_approach.search( + top=10, + query_text="test query", + filter=None, + vectors=[], + use_semantic_ranker=True, + use_semantic_captions=True, + minimum_search_score=minimum_search_score, + minimum_reranker_score=minimum_reranker_score, + ) + + assert ( + len(filtered_results) == expected_result_count + ), f"Expected {expected_result_count} results with minimum_search_score={minimum_search_score} and minimum_reranker_score={minimum_reranker_score}" + ===========changed ref 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: q = messages[-1]["content"] overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] vector_fields = overrides.get("vector_fields", ["embedding"]) include_gtpV_text = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] include_gtpV_images = overrides.get("gpt4v_input") in ["textAndImages", "images", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top", 3) + minimum_search_score = overrides.get("minimum_search_score", 0.0) + minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) use_semantic_ranker = overrides.get("semantic_ranker") and has_text # If retrieval mode includes vectors, compute an embedding for the query vectors = [] if has_vector: for field in vector_fields: vector = ( await self.compute_text_embedding(q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors.append(vector) # Only keep the text</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
ccf2494f3eadfae02126c2ec6b3be74d38e83618
Add minimum score criteria for AI search results (#1417)
<9>:<add> minimum_search_score = overrides.get("minimum_search_score", 0.0) <add> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <18>:<add> results = await self.search( <add> top, <add> query_text, <add> filter, <add> vectors, <add> use_semantic_ranker, <add> use_semantic_captions, <add> minimum_search_score, <add> minimum_reranker_score, <add> ) <del> results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_ranker = overrides.get("semantic_ranker") and has_text <6> <7> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <8> top = overrides.get("top", 3) <9> filter = self.build_filter(overrides, auth_claims) <10> # If retrieval mode includes vectors, compute an embedding for the query <11> vectors: list[VectorQuery] = [] <12> if has_vector: <13> vectors.append(await self.compute_text_embedding(q)) <14> <15> # Only keep the text query if the retrieval mode uses text, otherwise drop it <16> query_text = q if has_text else None <17> <18> results = await self.search(top, query_text, filter, vectors, use_semantic_ranker, use_semantic_captions) <19> <20> user_content = [q] <21> <22> template = overrides.get("prompt_template", self.system_chat_template) <23> model = self.chatgpt_model <24> message_builder = MessageBuilder(template, model) <25> <26> # Process results <27> sources_content = self.get_sources_content(results, use_semantic_captions, use_image_</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" message_builder.insert_message("user", user_content) message_builder.insert_message("assistant", self.answer) message_builder.insert_message("user", self.question) updated_messages = message_builder.messages chat_completion = ( await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=1024, n=1, ) ).model_dump() data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", query_text, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "has_vector": has_vector, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s>_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} if self.chatgpt_deployment else {"model": self.chatgpt_model} ), ), ], } chat_completion["choices"][0]["context"] = extra_info chat_completion["choices"][0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: approaches.approach.Document id: Optional[str] content: Optional[str] embedding: Optional[List[float]] image_embedding: Optional[List[float]] category: Optional[str] sourcepage: Optional[str] sourcefile: Optional[str] oids: Optional[List[str]] groups: Optional[List[str]] captions: List[QueryCaptionResult] score: Optional[float] = None reranker_score: Optional[float] = None serialize_for_results() -> dict[str, Any] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder insert_message(role: str, content: Union[str, List[ChatCompletionContentPartParam]], index: int=1) ===========unchanged ref 2=========== at: core.messagebuilder.MessageBuilder.__init__ self.messages: list[ChatCompletionMessageParam] = [ ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", system_content)) ] at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_chatapproach + def mock_search(*args, **kwargs): + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) +
tests.test_app_config/test_app_visionkey_notfound
Modified
Azure-Samples~azure-search-openai-demo
6aa14285543e923de55a1c639ac5b254aa1d69fd
Fix search env var (#1455)
<2>:<add> monkeypatch.setenv("AZURE_SEARCH_SECRET_NAME", "search-secret-name") <del> monkeypatch.setenv("SEARCH_SECRET_NAME", "search-secret-name")
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_visionkey_notfound(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_KEY_VAULT_NAME", "my_key_vault") <1> monkeypatch.setenv("VISION_SECRET_NAME", "") <2> monkeypatch.setenv("SEARCH_SECRET_NAME", "search-secret-name") <3> <4> async def get_secret(*args, **kwargs): <5> if args[1] == "vision-secret-name": <6> raise Exception("Key not found") <7> return MockKeyVaultSecret("mysecret") <8> <9> monkeypatch.setattr(SecretClient, "get_secret", get_secret) <10> <11> quart_app = app.create_app() <12> async with quart_app.test_app() as test_app: <13> test_app.test_client() <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.mocks MockKeyVaultSecret(value)
tests.test_app_config/test_app_searchkey_notfound
Modified
Azure-Samples~azure-search-openai-demo
6aa14285543e923de55a1c639ac5b254aa1d69fd
Fix search env var (#1455)
<2>:<add> monkeypatch.setenv("AZURE_SEARCH_SECRET_NAME", "") <del> monkeypatch.setenv("SEARCH_SECRET_NAME", "")
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_searchkey_notfound(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_KEY_VAULT_NAME", "my_key_vault") <1> monkeypatch.setenv("VISION_SECRET_NAME", "vision-secret-name") <2> monkeypatch.setenv("SEARCH_SECRET_NAME", "") <3> <4> async def get_secret(*args, **kwargs): <5> if args[1] == "search-secret-name": <6> raise Exception("Key not found") <7> return MockKeyVaultSecret("mysecret") <8> <9> monkeypatch.setattr(SecretClient, "get_secret", get_secret) <10> <11> quart_app = app.create_app() <12> async with quart_app.test_app() as test_app: <13> test_app.test_client() <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: tests.mocks MockKeyVaultSecret(value) ===========changed ref 0=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_visionkey_notfound(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_KEY_VAULT_NAME", "my_key_vault") monkeypatch.setenv("VISION_SECRET_NAME", "") + monkeypatch.setenv("AZURE_SEARCH_SECRET_NAME", "search-secret-name") - monkeypatch.setenv("SEARCH_SECRET_NAME", "search-secret-name") async def get_secret(*args, **kwargs): if args[1] == "vision-secret-name": raise Exception("Key not found") return MockKeyVaultSecret("mysecret") monkeypatch.setattr(SecretClient, "get_secret", get_secret) quart_app = app.create_app() async with quart_app.test_app() as test_app: test_app.test_client()
scripts.prepdocslib.embeddings/OpenAIEmbeddings.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<1>:<add> self.open_ai_dimensions = open_ai_dimensions
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): <0> self.open_ai_model_name = open_ai_model_name <1> self.disable_batch = disable_batch <2>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 2=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_batch
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<11>:<add> emb_response = await client.embeddings.create( <add> model=self.open_ai_model_name, input=batch.texts, **dimensions_args <add> ) <del> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts)
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: <0> batches = self.split_text_into_batches(texts) <1> embeddings = [] <2> client = await self.create_client() <3> for batch in batches: <4> async for attempt in AsyncRetrying( <5> retry=retry_if_exception_type(RateLimitError), <6> wait=wait_random_exponential(min=15, max=60), <7> stop=stop_after_attempt(15), <8> before_sleep=self.before_retry_sleep, <9> ): <10> with attempt: <11> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) <12> embeddings.extend([data.embedding for data in emb_response.data]) <13> logger.info( <14> "Computed embeddings in batch. Batch size: %d, Token count: %d", <15> len(batch.texts), <16> batch.token_length, <17> ) <18> <19> return embeddings <20>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_single
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<8>:<add> emb_response = await client.embeddings.create( <add> model=self.open_ai_model_name, input=text, **dimensions_args <add> ) <del> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text)
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: <0> client = await self.create_client() <1> async for attempt in AsyncRetrying( <2> retry=retry_if_exception_type(RateLimitError), <3> wait=wait_random_exponential(min=15, max=60), <4> stop=stop_after_attempt(15), <5> before_sleep=self.before_retry_sleep, <6> ): <7> with attempt: <8> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) <9> logger.info("Computed embedding for text section. Character count: %d", len(text)) <10> <11> return emb_response.data[0].embedding <12>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=batch.texts, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) logger.info( "Computed embeddings in batch. Batch size: %d, Token count: %d", len(batch.texts), batch.token_length, ) return embeddings ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embeddings
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<0>:<add> dimensions_args: ExtraArgs = ( <add> {"dimensions": self.open_ai_dimensions} <add> if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) <add> else {} <add> ) <add> <1>:<add> return await self.create_embedding_batch(texts, dimensions_args) <del> return await self.create_embedding_batch(texts) <3>:<add> return [await self.create_embedding_single(text, dimensions_args) for text in texts] <del> return [await self.create_embedding_single(text) for text in texts]
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: <0> if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: <1> return await self.create_embedding_batch(texts) <2> <3> return [await self.create_embedding_single(text) for text in texts] <4>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=batch.texts, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) logger.info( "Computed embeddings in batch. Batch size: %d, Token count: %d", len(batch.texts), batch.token_length, ) return embeddings ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 5=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.embeddings/AzureOpenAIEmbeddingService.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<0>:<add> super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) <del> super().__init__(open_ai_model_name, disable_batch)
# module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): <0> super().__init__(open_ai_model_name, disable_batch) <1> self.open_ai_service = open_ai_service <2> self.open_ai_deployment = open_ai_deployment <3> self.credential = credential <4>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=batch.texts, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) logger.info( "Computed embeddings in batch. Batch size: %d, Token count: %d", len(batch.texts), batch.token_length, ) return embeddings ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 6=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.embeddings/OpenAIEmbeddingService.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<0>:<add> super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) <del> super().__init__(open_ai_model_name, disable_batch)
<s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): <0> super().__init__(open_ai_model_name, disable_batch) <1> self.credential = credential <2> self.organization = organization <3>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=batch.texts, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) logger.info( "Computed embeddings in batch. Batch size: %d, Token count: %d", len(batch.texts), batch.token_length, ) return embeddings ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 7=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
scripts.prepdocslib.searchmanager/SearchManager.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<5>:<add> # Integrated vectorization uses the ada-002 model with 1536 dimensions <add> self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536
# module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): <0> self.search_info = search_info <1> self.search_analyzer_name = search_analyzer_name <2> self.use_acls = use_acls <3> self.use_int_vectorization = use_int_vectorization <4> self.embeddings = embeddings <5> self.search_images = search_images <6>
===========unchanged ref 0=========== at: scripts.prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool=False) at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + } ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]: - def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=batch.texts, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) embeddings.extend([data.embedding for data in emb_response.data]) logger.info( "Computed embeddings in batch. Batch size: %d, Token count: %d", len(batch.texts), batch.token_length, ) return embeddings
scripts.prepdocslib.searchmanager/SearchManager.create_index
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<30>:<add> vector_search_dimensions=self.embedding_dimensions, <del> vector_search_dimensions=1536,
# module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): <0> logger.info("Ensuring search index %s exists", self.search_info.index_name) <1> <2> async with self.search_info.create_search_index_client() as search_index_client: <3> fields = [ <4> ( <5> SimpleField(name="id", type="Edm.String", key=True) <6> if not self.use_int_vectorization <7> else SearchField( <8> name="id", <9> type="Edm.String", <10> key=True, <11> sortable=True, <12> filterable=True, <13> facetable=True, <14> analyzer_name="keyword", <15> ) <16> ), <17> SearchableField( <18> name="content", <19> type="Edm.String", <20> analyzer_name=self.search_analyzer_name, <21> ), <22> SearchField( <23> name="embedding", <24> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <25> hidden=False, <26> searchable=True, <27> filterable=False, <28> sortable=False, <29> facetable=False, <30> vector_search_dimensions=1536, <31> vector_search_profile_name="embedding_config", <32> ), <33> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <34> SimpleField( <35> name="sourcepage", <36> type="Edm.String", <37> filterable=True, <38> facetable=True, <39> ), <40> SimpleField( <41> name="sourcefile", <42> type="Edm.String", <43> filterable=True, <44> facetable=True, <45> ), <46> ] <47> if self.use_acls: <48> fields.append( <49> SimpleField( <50> name="oids", <51> type=SearchField</s>
===========below chunk 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 filterable=True, ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True, ) ) if self.use_int_vectorization: fields.append(SearchableField(name="parent_id", type="Edm.String", filterable=True)) if self.search_images: fields.append( SearchField( name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1024, vector_search_profile_name="embedding_config", ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_search=SemanticSearch( configurations=[ SemanticConfiguration( name="default", prioritized_fields=SemanticPrioritizedFields( title_field=None, content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswAlgorithmConfiguration( name="hnsw_config", parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm_configuration_name="hnsw_config", vectorizer=( f"{self.search_info.index_name}-vectorizer" if self.use_int_vectorization else None ), ), ], vectorizers=vectorizers, ), ) if self.search_info.index_</s> ===========below chunk 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s> ], vectorizers=vectorizers, ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: logger.info("Creating %s search index", self.search_info.index_name) await search_index_client.create_index(index) else: logger.info("Search index %s already exists", self.search_info.index_name) ===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.prepdocslib.searchmanager logger = logging.getLogger("ingester") at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 at: scripts.prepdocslib.strategy.SearchInfo create_search_index_client() -> SearchIndexClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential
tests.test_chatvisionapproach/chat_approach
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<18>:<add> embedding_model=MOCK_EMBEDDING_MODEL_NAME, <add> embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> embedding_model="text-",
# module: tests.test_chatvisionapproach @pytest.fixture def chat_approach(openai_client, mock_confidential_client_success): <0> return ChatReadRetrieveReadVisionApproach( <1> search_client=None, <2> openai_client=openai_client, <3> auth_helper=AuthenticationHelper( <4> search_index=MockSearchIndex, <5> use_authentication=True, <6> server_app_id="SERVER_APP", <7> server_app_secret="SERVER_SECRET", <8> client_app_id="CLIENT_APP", <9> tenant_id="TENANT_ID", <10> require_access_control=None, <11> ), <12> blob_container_client=None, <13> vision_endpoint="endpoint", <14> vision_token_provider=lambda: "token", <15> gpt4v_deployment="gpt-4v", <16> gpt4v_model="gpt-4v", <17> embedding_deployment="embeddings", <18> embedding_model="text-", <19> sourcepage_field="", <20> content_field="", <21> query_language="en-us", <22> query_speller="lexicon", <23> ) <24>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: tests.conftest mock_confidential_client_success(monkeypatch) at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" at: tests.test_chatvisionapproach MockSearchIndex = SearchIndex( name="test", fields=[ SearchField(name="oids", type="Collection(Edm.String)"), SearchField(name="groups", type="Collection(Edm.String)"), ], ) openai_client() ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + }
scripts.prepdocs/setup_embeddings_service
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<12>:<add> open_ai_dimensions=openai_dimensions, <20>:<add> open_ai_dimensions=openai_dimensions,
# module: scripts.prepdocs def setup_embeddings_service( azure_credential: AsyncTokenCredential, openai_host: str, openai_model_name: str, openai_service: str, openai_deployment: str, + openai_dimensions: int, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): <0> if disable_vectors: <1> logger.info("Not setting up embeddings service") <2> return None <3> <4> if openai_host != "openai": <5> azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( <6> azure_credential if openai_key is None else AzureKeyCredential(openai_key) <7> ) <8> return AzureOpenAIEmbeddingService( <9> open_ai_service=openai_service, <10> open_ai_deployment=openai_deployment, <11> open_ai_model_name=openai_model_name, <12> credential=azure_open_ai_credential, <13> disable_batch=disable_batch_vectors, <14> ) <15> else: <16> if openai_key is None: <17> raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") <18> return OpenAIEmbeddingService( <19> open_ai_model_name=openai_model_name, <20> credential=openai_key, <21> organization=openai_org, <22> disable_batch=disable_batch_vectors, <23> ) <24>
===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) OpenAIEmbeddingService(open_ai_model_name: str, open_ai_dimensions: int, credential: str, organization: Optional[str]=None, disable_batch: bool=False) at: scripts.prepdocs logger = logging.getLogger("ingester") ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + }
tests.test_prepdocs/test_compute_embedding_success
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<26>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-ada-003", <42>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-ada-003",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): <0> async def mock_create_client(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return MockClient( <3> embeddings_client=MockEmbeddingsClient( <4> create_embedding_response=openai.types.CreateEmbeddingResponse( <5> object="list", <6> data=[ <7> openai.types.Embedding( <8> embedding=[ <9> 0.0023064255, <10> -0.009327292, <11> -0.0028842222, <12> ], <13> index=0, <14> object="embedding", <15> ) <16> ], <17> model="text-embedding-ada-002", <18> usage=Usage(prompt_tokens=8, total_tokens=8), <19> ) <20> ) <21> ) <22> <23> embeddings = AzureOpenAIEmbeddingService( <24> open_ai_service="x", <25> open_ai_deployment="x", <26> open_ai_model_name="text-ada-003", <27> credential=MockAzureCredential(), <28> disable_batch=False, <29> ) <30> monkeypatch.setattr(embeddings, "create_client", mock_create_client) <31> assert await embeddings.create_embeddings(texts=["foo"]) == [ <32> [ <33> 0.0023064255, <34> -0.009327292, <35> -0.0028842222, <36> ] <37> ] <38> <39> embeddings = AzureOpenAIEmbeddingService( <40> open_ai_service="x", <41> open_ai_deployment="x", <42> open_ai_model_name="text-ada-003", <43> credential=MockAzureCredential(), <44> disable_batch=True, <45> ) <46> monkeypatch.setattr(embeddings, "create_client",</s>
===========below chunk 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + }
tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<6>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-embedding-ada-002",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): <0> with caplog.at_level(logging.INFO): <1> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <2> with pytest.raises(tenacity.RetryError): <3> embeddings = AzureOpenAIEmbeddingService( <4> open_ai_service="x", <5> open_ai_deployment="x", <6> open_ai_model_name="text-embedding-ada-002", <7> credential=MockAzureCredential(), <8> disable_batch=False, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <13>
===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_</s> ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=False, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=True, - open_ai_</s> ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 5=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 6=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 8=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images
tests.test_prepdocs/test_compute_embedding_ratelimiterror_single
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<6>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-embedding-ada-002",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): <0> with caplog.at_level(logging.INFO): <1> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <2> with pytest.raises(tenacity.RetryError): <3> embeddings = AzureOpenAIEmbeddingService( <4> open_ai_service="x", <5> open_ai_deployment="x", <6> open_ai_model_name="text-embedding-ada-002", <7> credential=MockAzureCredential(), <8> disable_batch=True, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <13>
===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_</s> ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=False, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=True, - open_ai_</s> ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 6=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 7=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"])
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<5>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-embedding-ada-002", <16>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-embedding-ada-002",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): <0> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1> with pytest.raises(openai.AuthenticationError): <2> embeddings = AzureOpenAIEmbeddingService( <3> open_ai_service="x", <4> open_ai_deployment="x", <5> open_ai_model_name="text-embedding-ada-002", <6> credential=MockAzureCredential(), <7> disable_batch=False, <8> ) <9> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <10> await embeddings.create_embeddings(texts=["foo"]) <11> <12> with pytest.raises(openai.AuthenticationError): <13> embeddings = AzureOpenAIEmbeddingService( <14> open_ai_service="x", <15> open_ai_deployment="x", <16> open_ai_model_name="text-embedding-ada-002", <17> credential=MockAzureCredential(), <18> disable_batch=True, <19> ) <20> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <21> await embeddings.create_embeddings(texts=["foo"]) <22>
===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_</s> ===========changed ref 5=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=False, - open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=MockAzureCredential(), + organization="org", + disable_batch=True, - open_ai_</s> ===========changed ref 6=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ]
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<6>:<add> self.embedding_dimensions = embedding_dimensions
<s>[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.embedding_model = embedding_model <5> self.embedding_deployment = embedding_deployment <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.gpt4v_deployment = gpt4v_deployment <9> self.gpt4v_model = gpt4v_model <10> self.query_language = query_language <11> self.query_speller = query_speller <12> self.vision_endpoint = vision_endpoint <13> self.vision_token_provider = vision_token_provider <14>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False) at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]: - def create_embedding_single(self, text: str) -> List[float]: client = await self.create_client() async for attempt in AsyncRetrying( retry=retry_if_exception_type(RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: + emb_response = await client.embeddings.create( + model=self.open_ai_model_name, input=text, **dimensions_args + ) - emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) logger.info("Computed embedding for text section. Character count: %d", len(text)) return emb_response.data[0].embedding ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): """ Contains common logic across both OpenAI and Azure OpenAI embedding services Can split source text into batches for more efficient embedding calls """ + SUPPORTED_BATCH_AOAI_MODEL = { + "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, - SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} + "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, + "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, + } + SUPPORTED_DIMENSIONS_MODEL = { + "text-embedding-ada-002": False, + "text-embedding-3-small": True, + "text-embedding-3-large": True, + }
tests.test_chatapproach/chat_approach
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<7>:<add> embedding_model=MOCK_EMBEDDING_MODEL_NAME, <add> embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> embedding_model="text-",
# module: tests.test_chatapproach @pytest.fixture def chat_approach(): <0> return ChatReadRetrieveReadApproach( <1> search_client=None, <2> auth_helper=None, <3> openai_client=None, <4> chatgpt_model="gpt-35-turbo", <5> chatgpt_deployment="chat", <6> embedding_deployment="embeddings", <7> embedding_model="text-", <8> sourcepage_field="", <9> content_field="", <10> query_language="en-us", <11> query_speller="lexicon", <12> ) <13>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" MockAsyncSearchResultsIterator(search_text, vector_queries: Optional[list[VectorQuery]]) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 7=========== <s>[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment + self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider
tests.test_chatapproach/test_search_results_filtering_by_scores
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<7>:<add> embedding_model=MOCK_EMBEDDING_MODEL_NAME, <add> embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> embedding_model="text-",
<s>expected_result_count", [ (0, 0, 1), (0, 2, 1), (0.03, 0, 1), (0.03, 2, 1), (1, 0, 0), (0, 4, 0), (1, 4, 0), ], ) async def test_search_results_filtering_by_scores( monkeypatch, minimum_search_score, minimum_reranker_score, expected_result_count ): <0> chat_approach = ChatReadRetrieveReadApproach( <1> search_client=SearchClient(endpoint="", index_name="", credential=AzureKeyCredential("")), <2> auth_helper=None, <3> openai_client=None, <4> chatgpt_model="gpt-35-turbo", <5> chatgpt_deployment="chat", <6> embedding_deployment="embeddings", <7> embedding_model="text-", <8> sourcepage_field="", <9> content_field="", <10> query_language="en-us", <11> query_speller="lexicon", <12> ) <13> <14> monkeypatch.setattr(SearchClient, "search", mock_search) <15> <16> filtered_results = await chat_approach.search( <17> top=10, <18> query_text="test query", <19> filter=None, <20> vectors=[], <21> use_semantic_ranker=True, <22> use_semantic_captions=True, <23> minimum_search_score=minimum_search_score, <24> minimum_reranker_score=minimum_reranker_score, <25> ) <26> <27> assert ( <28> len(filtered_results) == expected_result_count <29> ), f"Expected {expected_result_count} results with minimum_search_score={minimum_search_score} and minimum_reranker_score={minimum_reranker_score}" <30>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" at: tests.test_chatapproach mock_search(*args, **kwargs) at: tests.test_chatapproach.test_get_messages_from_history_few_shots user_query_request = "What does a Product manager do?" messages = chat_approach.get_messages_from_history( system_prompt=chat_approach.query_prompt_template, model_id=chat_approach.chatgpt_model, user_content=user_query_request, history=[], max_tokens=chat_approach.chatgpt_token_limit - len(user_query_request), few_shots=chat_approach.query_prompt_few_shots, ) ===========changed ref 0=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 6=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 8=========== <s>[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment + self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider
app.backend.approaches.approach/Approach.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<7>:<add> self.embedding_dimensions = embedding_dimensions
<s>ai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.auth_helper = auth_helper <3> self.query_language = query_language <4> self.query_speller = query_speller <5> self.embedding_deployment = embedding_deployment <6> self.embedding_model = embedding_model <7> self.openai_host = openai_host <8> self.vision_endpoint = vision_endpoint <9> self.vision_token_provider = vision_token_provider <10>
===========unchanged ref 0=========== at: abc ABC() at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False) at: dataclasses dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]] dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]] dataclass(_cls: Type[_T]) -> Type[_T] at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 8=========== <s>[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment + self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider
app.backend.approaches.approach/Approach.compute_text_embedding
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<0>:<add> SUPPORTED_DIMENSIONS_MODEL = { <add> "text-embedding-ada-002": False, <add> "text-embedding-3-small": True, <add> "text-embedding-3-large": True, <add> } <add> <add> class ExtraArgs(TypedDict, total=False): <add> dimensions: int <add> <add> dimensions_args: ExtraArgs = ( <add> {"dimensions": self.embedding_dimensions} if SUPPORTED_DIMENSIONS_MODEL[self.embedding_model] else {} <add> ) <4>:<add> **dimensions_args,
# module: app.backend.approaches.approach class Approach(ABC): def compute_text_embedding(self, q: str): <0> embedding = await self.openai_client.embeddings.create( <1> # Azure OpenAI takes the deployment name as the model name <2> model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, <3> input=q, <4> ) <5> query_vector = embedding.data[0].embedding <6> return VectorizedQuery(vector=query_vector, k_nearest_neighbors=50, fields="embedding") <7>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] ===========changed ref 0=========== <s>ai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], ): self.search_client = search_client self.openai_client = openai_client self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.openai_host = openai_host self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 6=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 7=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts] ===========changed ref 9=========== <s>[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment + self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider
tests.test_searchmanager/test_update_content_with_embeddings
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<32>:<add> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <add> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <del> open_ai_model_name="text-ada-003",
# module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): <0> async def mock_create_client(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return MockClient( <3> embeddings_client=MockEmbeddingsClient( <4> create_embedding_response=openai.types.CreateEmbeddingResponse( <5> object="list", <6> data=[ <7> openai.types.Embedding( <8> embedding=[ <9> 0.0023064255, <10> -0.009327292, <11> -0.0028842222, <12> ], <13> index=0, <14> object="embedding", <15> ) <16> ], <17> model="text-embedding-ada-002", <18> usage=Usage(prompt_tokens=8, total_tokens=8), <19> ) <20> ) <21> ) <22> <23> documents_uploaded = [] <24> <25> async def mock_upload_documents(self, documents): <26> documents_uploaded.extend(documents) <27> <28> monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents) <29> embeddings = AzureOpenAIEmbeddingService( <30> open_ai_service="x", <31> open_ai_deployment="x", <32> open_ai_model_name="text-ada-003", <33> credential=AzureKeyCredential("test"), <34> disable_batch=True, <35> ) <36> monkeypatch.setattr(embeddings, "create_client", mock_create_client) <37> manager = SearchManager( <38> search_info, <39> embeddings=embeddings, <40> ) <41> <42> test_io = io.BytesIO(b"test content") <43> test_io.name = "test/foo.pdf" <44> file = File(test_io) <45> <46> await manager.update_content( <47> [ </s>
===========below chunk 0=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): # offset: 1 split_page=SplitPage( page_num=0, text="test content", ), content=file, category="test", ) ] ) assert len(documents_uploaded) == 1, "It should have uploaded one document" assert documents_uploaded[0]["embedding"] == [ 0.0023064255, -0.009327292, -0.0028842222, ] ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO name: Any at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False) at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.page SplitPage(page_num: int, text: str) at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, use_int_vectorization: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) at: scripts.prepdocslib.searchmanager.SearchManager update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None) at: tests.test_searchmanager MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse) MockClient(embeddings_client) ===========changed ref 0=========== # module: tests.test_searchmanager + @pytest.fixture + def embeddings_service(monkeypatch): + async def mock_create_client(*args, **kwargs): + # From https://platform.openai.com/docs/api-reference/embeddings/create + return MockClient( + embeddings_client=MockEmbeddingsClient( + create_embedding_response=openai.types.CreateEmbeddingResponse( + object="list", + data=[ + openai.types.Embedding( + embedding=[ + 0.0023064255, + -0.009327292, + -0.0028842222, + ], + index=0, + object="embedding", + ) + ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), + ) + ) + ) + + embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", + open_ai_deployment="x", + open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, + open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + credential=AzureKeyCredential("test"), + disable_batch=True, + ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) + return embeddings + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 3=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 4=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 6=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<7>:<add> self.embedding_dimensions = embedding_dimensions
<s>ai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.auth_helper = auth_helper <3> self.chatgpt_model = chatgpt_model <4> self.chatgpt_deployment = chatgpt_deployment <5> self.embedding_deployment = embedding_deployment <6> self.embedding_model = embedding_model <7> self.sourcepage_field = sourcepage_field <8> self.content_field = content_field <9> self.query_language = query_language <10> self.query_speller = query_speller <11> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <12>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== <s>ai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], ): self.search_client = search_client self.openai_client = openai_client self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.openai_host = openai_host self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider ===========changed ref 7=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embeddings(self, texts: List[str]) -> List[List[float]]: + dimensions_args: ExtraArgs = ( + {"dimensions": self.open_ai_dimensions} + if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name) + else {} + ) + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts, dimensions_args) - return await self.create_embedding_batch(texts) + return [await self.create_embedding_single(text, dimensions_args) for text in texts] - return [await self.create_embedding_single(text) for text in texts]
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<6>:<add> self.embedding_dimensions = embedding_dimensions
<s>ai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.chatgpt_model = chatgpt_model <5> self.embedding_model = embedding_model <6> self.chatgpt_deployment = chatgpt_deployment <7> self.embedding_deployment = embedding_deployment <8> self.sourcepage_field = sourcepage_field <9> self.content_field = content_field <10> self.query_language = query_language <11> self.query_speller = query_speller <12>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== <s>ai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], ): self.search_client = search_client self.openai_client = openai_client self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.openai_host = openai_host self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider ===========changed ref 7=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 8=========== <s>ai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.openai_client = openai_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model)
app.backend.approaches.chatreadretrievereadvision/ChatReadRetrieveReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
7a7881e2269d4a8bdf820eb6b33f723019d07a56
Add support for using new ada models with different dimensions (#1378)
<8>:<add> self.embedding_dimensions = embedding_dimensions
<s>zure OpenAI gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.gpt4v_deployment = gpt4v_deployment <5> self.gpt4v_model = gpt4v_model <6> self.embedding_deployment = embedding_deployment <7> self.embedding_model = embedding_model <8> self.sourcepage_field = sourcepage_field <9> self.content_field = content_field <10> self.query_language = query_language <11> self.query_speller = query_speller <12> self.vision_endpoint = vision_endpoint <13> self.vision_token_provider = vision_token_provider <14> self.chatgpt_token_limit = get_token_limit(gpt4v_model) <15>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False) at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False): - def __init__(self, open_ai_model_name: str, disable_batch: bool = False): self.open_ai_model_name = open_ai_model_name + self.open_ai_dimensions = open_ai_dimensions self.disable_batch = disable_batch ===========changed ref 2=========== <s> scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): def __init__( + self, + open_ai_model_name: str, + open_ai_dimensions: int, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, - self, open_ai_model_name: str, credential: str, organization: Optional[str] = None, disable_batch: bool = False ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.credential = credential self.organization = organization ===========changed ref 3=========== # module: tests.mocks + MOCK_EMBEDDING_DIMENSIONS = 1536 + MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def __init__( self, open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, + open_ai_dimensions: int, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool = False, ): + super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) - super().__init__(open_ai_model_name, disable_batch) self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment self.credential = credential ===========changed ref 5=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, use_int_vectorization: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.use_int_vectorization = use_int_vectorization self.embeddings = embeddings + # Integrated vectorization uses the ada-002 model with 1536 dimensions + self.embedding_dimensions = self.embeddings.open_ai_dimensions if self.embeddings else 1536 self.search_images = search_images ===========changed ref 6=========== <s>ai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, + embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], ): self.search_client = search_client self.openai_client = openai_client self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.openai_host = openai_host self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider ===========changed ref 7=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", + embedding_model=MOCK_EMBEDDING_MODEL_NAME, + embedding_dimensions=MOCK_EMBEDDING_DIMENSIONS, - embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 8=========== <s>ai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" + embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.embedding_model = embedding_model + self.embedding_dimensions = embedding_dimensions self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller
tests.test_blob_manager/test_upload_and_remove_all
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<2>:<del> print(f.content.name)
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> print(f.content.name) <3> filename = os.path.basename(f.content.name) <4> <5> # Set up mocks used by upload_blob <6> async def mock_exists(*args, **kwargs): <7> return True <8> <9> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <10> <11> async def mock_upload_blob(self, name, *args, **kwargs): <12> assert name == filename <13> return True <14> <15> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <16> <17> await blob_manager.upload_blob(f) <18> <19> # Set up mocks used by remove_blob <20> def mock_list_blob_names(*args, **kwargs): <21> assert kwargs.get("name_starts_with") is None <22> <23> class AsyncBlobItemsIterator: <24> def __init__(self, file): <25> self.files = [file] <26> <27> def __aiter__(self): <28> return self <29> <30> async def __anext__(self): <31> if self.files: <32> return self.files.pop() <33> raise StopAsyncIteration <34> <35> return AsyncBlobItemsIterator(filename) <36> <37> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) <38> <39> async def mock_delete_blob(self, name, *args, **kwargs): <40> assert name == filename <41> return True <42> <43> monkeypatch.</s>
===========below chunk 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): # offset: 1 await blob_manager.remove_blob() ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: sys version_info: _version_info at: sys._version_info major: int minor: int micro: int releaselevel: str serial: int ===========unchanged ref 1=========== at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 1=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 2=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 3=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 4=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response +
tests.test_content_file/test_content_file
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<0>:<del> class MockAiohttpClientResponse404(aiohttp.ClientResponse): <1>:<del> def __init__(self, url, body_bytes, headers=None): <2>:<del> self._body = body_bytes <3>:<del> self._headers = headers <4>:<del> self._cache = {} <5>:<del> self.status = 404 <6>:<del> self.reason = "Not Found" <7>:<del> self._url = url <8>:<del> <9>:<del> class MockAiohttpClientResponse(aiohttp.ClientResponse): <10>:<del> def __init__(self, url, body_bytes, headers=None): <11>:<del> self._body = body_bytes <12>:<del> self._headers = headers <13>:<del> self._cache = {} <14>:<del> self.status = 200 <15>:<del> self.reason = "OK" <16>:<del> self._url = url <17>:<del> <20>:<add> if request.url.endswith("notfound.pdf") or request.url.endswith("userdoc.pdf"): <del> if request.url.endswith("notfound.pdf"): <48>:<del> # Then we can plug this into any SDK via kwargs:
# module: tests.test_content_file @pytest.mark.asyncio async def test_content_file(monkeypatch, mock_env, mock_acs_search): <0> class MockAiohttpClientResponse404(aiohttp.ClientResponse): <1> def __init__(self, url, body_bytes, headers=None): <2> self._body = body_bytes <3> self._headers = headers <4> self._cache = {} <5> self.status = 404 <6> self.reason = "Not Found" <7> self._url = url <8> <9> class MockAiohttpClientResponse(aiohttp.ClientResponse): <10> def __init__(self, url, body_bytes, headers=None): <11> self._body = body_bytes <12> self._headers = headers <13> self._cache = {} <14> self.status = 200 <15> self.reason = "OK" <16> self._url = url <17> <18> class MockTransport(AsyncHttpTransport): <19> async def send(self, request: HttpRequest, **kwargs) -> AioHttpTransportResponse: <20> if request.url.endswith("notfound.pdf"): <21> raise ResourceNotFoundError(MockAiohttpClientResponse404(request.url, b"")) <22> else: <23> return AioHttpTransportResponse( <24> request, <25> MockAiohttpClientResponse( <26> request.url, <27> b"test content", <28> { <29> "Content-Type": "application/octet-stream", <30> "Content-Range": "bytes 0-27/28", <31> "Content-Length": "28", <32> }, <33> ), <34> ) <35> <36> async def __aenter__(self): <37> return self <38> <39> async def __aexit__(self, *args): <40> pass <41> <42> async def open(self): <43> pass <44> <45> async def close(self): <46> pass <47> <48> # Then we can plug this into any SDK via kwargs: <49> blob_client = BlobServiceClient( <50> f"https://{</s>
===========below chunk 0=========== # module: tests.test_content_file @pytest.mark.asyncio async def test_content_file(monkeypatch, mock_env, mock_acs_search): # offset: 1 credential=MockAzureCredential(), transport=MockTransport(), retry_total=0, # Necessary to avoid unnecessary network requests during tests ) blob_container_client = blob_client.get_container_client(os.environ["AZURE_STORAGE_CONTAINER"]) quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"blob_container_client": blob_container_client}) client = test_app.test_client() response = await client.get("/content/notfound.pdf") assert response.status_code == 404 response = await client.get("/content/role_library.pdf") assert response.status_code == 200 assert response.headers["Content-Type"] == "application/pdf" assert await response.get_data() == b"test content" response = await client.get("/content/role_library.pdf#page=10") assert response.status_code == 200 assert response.headers["Content-Type"] == "application/pdf" assert await response.get_data() == b"test content" ===========unchanged ref 0=========== at: aiohttp.client_reqrep ClientResponse(method: str, url: URL, *, writer: "asyncio.Task[None]", continue100: Optional["asyncio.Future[bool]"], timer: BaseTimerContext, request_info: RequestInfo, traces: List["Trace"], loop: asyncio.AbstractEventLoop, session: "ClientSession") at: aiohttp.client_reqrep.ClientResponse version = None # HTTP-Version status: int = None # type: ignore[assignment] # Status-Code reason = None # Reason-Phrase content: StreamReader = None # type: ignore[assignment] # Payload stream _headers: "CIMultiDictProxy[str]" = None # type: ignore[assignment] _raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers _connection = None # current connection _source_traceback: Optional[traceback.StackSummary] = None _closed = True # to allow __del__ for non-initialized properly response _released = False __init__(self, method: str, url: URL, *, writer: "asyncio.Task[None]", continue100: Optional["asyncio.Future[bool]"], timer: BaseTimerContext, request_info: RequestInfo, traces: List["Trace"], loop: asyncio.AbstractEventLoop, session: "ClientSession") -> None ===========changed ref 0=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 1=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 2=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 3=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 4=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 5=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): with NamedTemporaryFile(suffix=".pdf") as temp_file: f = File(temp_file.file) - print(f.content.name) filename = os.path.basename(f.content.name) # Set up mocks used by upload_blob async def mock_exists(*args, **kwargs): return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) async def mock_upload_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) await blob_manager.upload_blob(f) # Set up mocks used by remove_blob def mock_list_blob_names(*args, **kwargs): assert kwargs.get("name_starts_with") is None class AsyncBlobItemsIterator: def __init__(self, file): self.files = [file] def __aiter__(self): return self async def __anext__(self): if self.files: return self.files.pop() raise StopAsyncIteration return AsyncBlobItemsIterator(filename) monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) async def mock_delete_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) await blob_manager.remove_blob()
tests.test_searchmanager/test_remove_content
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<0>:<del> class AsyncSearchResultsIterator: <1>:<del> def __init__(self): <2>:<del> self.results = [ <3>:<del> { <4>:<del> "@search.score": 1, <5>:<del> "id": "file-foo_pdf-666F6F2E706466-page-0", <6>:<del> "content": "test content", <7>:<del> "category": "test", <8>:<del> "sourcepage": "foo.pdf#page=1", <9>:<del> "sourcefile": "foo.pdf", <10>:<del> } <11>:<del> ] <12>:<del> <13>:<del> def __aiter__(self): <14>:<del> return self <15>:<del> <16>:<del> async def __anext__(self): <17>:<del> if len(self.results) == 0: <18>:<del> raise StopAsyncIteration <19>:<del> return self.results.pop() <20>:<del> <21>:<del> async def get_count(self): <22>:<del> return len(self.results) <23>:<del> <24>:<add> search_results = AsyncSearchResultsIterator( <del> search_results = AsyncSearchResultsIterator() <25>:<add> [ <add> { <add> "@search.score": 1, <add> "id": "file-foo_pdf-666F6F2E706466-page-0", <add> "content": "test content", <add> "category": "test", <add> "sourcepage": "foo.pdf#page=1", <add> "sourcefile": "foo.pdf", <add> } <add> ] <add> )
# module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content(monkeypatch, search_info): <0> class AsyncSearchResultsIterator: <1> def __init__(self): <2> self.results = [ <3> { <4> "@search.score": 1, <5> "id": "file-foo_pdf-666F6F2E706466-page-0", <6> "content": "test content", <7> "category": "test", <8> "sourcepage": "foo.pdf#page=1", <9> "sourcefile": "foo.pdf", <10> } <11> ] <12> <13> def __aiter__(self): <14> return self <15> <16> async def __anext__(self): <17> if len(self.results) == 0: <18> raise StopAsyncIteration <19> return self.results.pop() <20> <21> async def get_count(self): <22> return len(self.results) <23> <24> search_results = AsyncSearchResultsIterator() <25> <26> searched_filters = [] <27> <28> async def mock_search(self, *args, **kwargs): <29> self.filter = kwargs.get("filter") <30> searched_filters.append(self.filter) <31> return search_results <32> <33> monkeypatch.setattr(SearchClient, "search", mock_search) <34> <35> deleted_documents = [] <36> <37> async def mock_delete_documents(self, documents): <38> deleted_documents.extend(documents) <39> return documents <40> <41> monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) <42> <43> manager = SearchManager(search_info) <44> <45> await manager.remove_content("foo.pdf") <46> <47> assert len(searched_filters) == 2, "It should have searched twice (with no results on second try)" <48> assert searched_filters[0] == "sourcefile eq 'foo.pdf'" <49> assert len(deleted_documents) == 1, "It should have deleted one document"</s>
===========below chunk 0=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content(monkeypatch, search_info): # offset: 1 ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.test_searchmanager AsyncSearchResultsIterator(results) at: tests.test_searchmanager.AsyncSearchResultsIterator.__init__ self.results = results at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 1=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 2=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __anext__(self): + if len(self.results) == 0: + raise StopAsyncIteration + return self.results.pop() + ===========changed ref 3=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 4=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 5=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 6=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 8=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 9=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 10=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 11=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 12=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 13=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 14=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 15=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_indexer_client(self) -> SearchIndexerClient: + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 16=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_index_client(self) -> SearchIndexClient: + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 17=========== + # module: app.backend.prepdocslib.strategy + USER_AGENT = "azure-search-chat-demo/1.0.0" + ===========changed ref 18=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): + self.endpoint = endpoint + self.credential = credential + self.index_name = index_name + ===========changed ref 19=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_client(self) -> SearchClient: + return SearchClient(endpoint=self.endpoint, index_name=self.index_name, credential=self.credential) + ===========changed ref 20=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + """ + Class representing a connection to a search service + To learn more, please visit https://learn.microsoft.com/azure/search/search-what-is-azure-search + """ + ===========changed ref 21=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + """ + Abstract strategy for ingesting documents into a search service. It has a single setup step to perform any required initialization, and then a run step that actually ingests documents into the search service. + """ + ===========changed ref 22=========== # module: tests.test_content_file + class MockAiohttpClientResponse(aiohttp.ClientResponse): + def __init__(self, url, body_bytes, headers=None): + self._body = body_bytes + self._headers = headers + self._cache = {} + self.status = 200 + self.reason = "OK" + self._url = url + ===========changed ref 23=========== # module: tests.test_content_file + class MockAiohttpClientResponse404(aiohttp.ClientResponse): + def __init__(self, url, body_bytes, headers=None): + self._body = body_bytes + self._headers = headers + self._cache = {} + self.status = 404 + self.reason = "Not Found" + self._url = url + ===========changed ref 24=========== # module: tests.test_content_file + @pytest.mark.asyncio + async def test_content_file_useruploaded_notfound(monkeypatch, auth_client, mock_blob_container_client): + class MockBlobClient: + async def download_blob(self): + raise ResourceNotFoundError(MockAiohttpClientResponse404("userdoc.pdf", b"")) + + monkeypatch.setattr( + azure.storage.blob.aio.ContainerClient, "get_blob_client", lambda *args, **kwargs: MockBlobClient() + ) + + async def mock_download_file(self): + raise ResourceNotFoundError(MockAiohttpClientResponse404("userdoc.pdf", b"")) + + monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "download_file", mock_download_file) + + response = await auth_client.get("/content/userdoc.pdf", headers={"Authorization": "Bearer test"}) + assert response.status_code == 404 +
tests.test_searchmanager/test_remove_content_only_oid
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<0>:<del> class AsyncSearchResultsIterator: <1>:<del> def __init__(self): <2>:<del> self.results = [ <3>:<del> { <4>:<del> "@search.score": 1, <5>:<del> "id": "file-foo_pdf-666", <6>:<del> "content": "test content", <7>:<del> "category": "test", <8>:<del> "sourcepage": "foo.pdf#page=1", <9>:<del> "sourcefile": "foo.pdf", <10>:<del> "oids": [], <11>:<del> }, <12>:<del> { <13>:<del> "@search.score": 1, <14>:<del> "id": "file-foo_pdf-333", <15>:<del> "content": "test content", <16>:<del> "category": "test", <17>:<del> "sourcepage": "foo.pdf#page=1", <18>:<del> "sourcefile": "foo.pdf", <19>:<del> "oids": ["A-USER-ID", "B-USER-ID"], <20>:<del> }, <21>:<del> { <22>:<del> "@search.score": 1, <23>:<del> "id": "file-foo_pdf-222", <24>:<del> "content": "test content", <25>:<del> "category": "test", <26>:<del> "sourcepage": "foo.pdf#page=1", <27>:<del> "sourcefile": "foo.pdf", <28>:<del> "oids": ["A-USER-ID"], <29>:<del> }, <30>:<del> ] <31>:<del> <32>:<del> def __aiter__(self): <33>:<del> return self <34>:<del> <35>:<del> async def __anext__(self): <36>:<del> if len(self.results) == 0: <37>:<del> raise StopAsyncIteration <38>:<del> return self.results.pop() <39>:<del> <40>:<del> async def get_count(self): <41>:<del> return len(self.results) <42>:<del> <43>:<add> search_results = AsyncSearchResultsIterator( <del> search_results = AsyncSearchResultsIterator() <44>:<add> [ <add> { <add> "@search.score": 1, <add> "id": "file-foo_pdf-666", <add> "content": "test content", <add> "category": "test", <add> "sourcepage": "foo.pdf#page=1", <add> "sourcefile": "foo.pdf", <add> "oids": [], <add> }, <add> { <add> "@search.score": 1, <add> "id": "file-foo_pdf-333", <add> "content": "test content", <add> "category": "test", <add> "sourcepage": "foo.pdf#page=1
# module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content_only_oid(monkeypatch, search_info): <0> class AsyncSearchResultsIterator: <1> def __init__(self): <2> self.results = [ <3> { <4> "@search.score": 1, <5> "id": "file-foo_pdf-666", <6> "content": "test content", <7> "category": "test", <8> "sourcepage": "foo.pdf#page=1", <9> "sourcefile": "foo.pdf", <10> "oids": [], <11> }, <12> { <13> "@search.score": 1, <14> "id": "file-foo_pdf-333", <15> "content": "test content", <16> "category": "test", <17> "sourcepage": "foo.pdf#page=1", <18> "sourcefile": "foo.pdf", <19> "oids": ["A-USER-ID", "B-USER-ID"], <20> }, <21> { <22> "@search.score": 1, <23> "id": "file-foo_pdf-222", <24> "content": "test content", <25> "category": "test", <26> "sourcepage": "foo.pdf#page=1", <27> "sourcefile": "foo.pdf", <28> "oids": ["A-USER-ID"], <29> }, <30> ] <31> <32> def __aiter__(self): <33> return self <34> <35> async def __anext__(self): <36> if len(self.results) == 0: <37> raise StopAsyncIteration <38> return self.results.pop() <39> <40> async def get_count(self): <41> return len(self.results) <42> <43> search_results = AsyncSearchResultsIterator() <44> <45> searched_filters = [] <46> <47> async def mock_search(self, *args, **kwargs): <48> self.filter = kwargs.get("filter") <49> searched_filters.append(self.filter) <50> </s>
===========below chunk 0=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content_only_oid(monkeypatch, search_info): # offset: 1 monkeypatch.setattr(SearchClient, "search", mock_search) deleted_documents = [] async def mock_delete_documents(self, documents): deleted_documents.extend(documents) return documents monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) manager = SearchManager( search_info, ) await manager.remove_content("foo.pdf", only_oid="A-USER-ID") assert len(searched_filters) == 2, "It should have searched twice (with no results on second try)" assert searched_filters[0] == "sourcefile eq 'foo.pdf'" assert len(deleted_documents) == 1, "It should have deleted one document" assert deleted_documents[0]["id"] == "file-foo_pdf-222" ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: tests.test_searchmanager AsyncSearchResultsIterator(results) at: tests.test_searchmanager.test_remove_content_no_docs search_results = AsyncSearchResultsIterator([]) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 1=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 2=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 3=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __anext__(self): + if len(self.results) == 0: + raise StopAsyncIteration + return self.results.pop() + ===========changed ref 4=========== # module: tests.test_searchmanager + @pytest.mark.asyncio + async def test_remove_content_no_docs(monkeypatch, search_info): + search_results = AsyncSearchResultsIterator([]) + + async def mock_search(self, *args, **kwargs): + return search_results + + monkeypatch.setattr(SearchClient, "search", mock_search) + + deleted_calls = [] + + async def mock_delete_documents(self, documents): + deleted_calls.append(documents) + return documents + + monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) + + manager = SearchManager(search_info) + await manager.remove_content("foobar.pdf") + + assert len(deleted_calls) == 0, "It should have made zero calls to delete_documents" + ===========changed ref 5=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 6=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 7=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 8=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content(monkeypatch, search_info): - class AsyncSearchResultsIterator: - def __init__(self): - self.results = [ - { - "@search.score": 1, - "id": "file-foo_pdf-666F6F2E706466-page-0", - "content": "test content", - "category": "test", - "sourcepage": "foo.pdf#page=1", - "sourcefile": "foo.pdf", - } - ] - - def __aiter__(self): - return self - - async def __anext__(self): - if len(self.results) == 0: - raise StopAsyncIteration - return self.results.pop() - - async def get_count(self): - return len(self.results) - + search_results = AsyncSearchResultsIterator( - search_results = AsyncSearchResultsIterator() + [ + { + "@search.score": 1, + "id": "file-foo_pdf-666F6F2E706466-page-0", + "content": "test content", + "category": "test", + "sourcepage": "foo.pdf#page=1", + "sourcefile": "foo.pdf", + } + ] + ) searched_filters = [] async def mock_search(self, *args, **kwargs): self.filter = kwargs.get("filter") searched_filters.append(self.filter) return search_results monkeypatch.setattr(SearchClient, "search", mock_search) deleted_documents = [] async def mock_delete_documents(self, documents): deleted_documents.extend(documents) return documents monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) </s> ===========changed ref 9=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_remove_content(monkeypatch, search_info): # offset: 1 <s> return documents monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) manager = SearchManager(search_info) await manager.remove_content("foo.pdf") assert len(searched_filters) == 2, "It should have searched twice (with no results on second try)" assert searched_filters[0] == "sourcefile eq 'foo.pdf'" assert len(deleted_documents) == 1, "It should have deleted one document" assert deleted_documents[0]["id"] == "file-foo_pdf-666F6F2E706466-page-0" ===========changed ref 10=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 12=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 13=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 14=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 15=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 16=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 17=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 +
app.backend.decorators/authenticated_path
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<22>:<add> return await route_fn(path, auth_claims) <del> return await route_fn(path)
# module: app.backend.decorators + def authenticated_path(route_fn: Callable[[str, Dict[str, Any]], Any]): - def authenticated_path(route_fn: Callable[[str], Any]): <0> """ <1> Decorator for routes that request a specific file that might require access control enforcement <2> """ <3> <4> @wraps(route_fn) <5> async def auth_handler(path=""): <6> # If authentication is enabled, validate the user can access the file <7> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <8> search_client = current_app.config[CONFIG_SEARCH_CLIENT] <9> authorized = False <10> try: <11> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <12> authorized = await auth_helper.check_path_auth(path, auth_claims, search_client) <13> except AuthError: <14> abort(403) <15> except Exception as error: <16> logging.exception("Problem checking path auth %s", error) <17> return error_response(error, route="/content") <18> <19> if not authorized: <20> abort(403) <21> <22> return await route_fn(path) <23> <24> return auth_handler <25>
===========unchanged ref 0=========== at: config CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" at: core.authentication AuthError(error, status_code) at: error error_response(error: Exception, route: str, status_code: int=500) at: functools wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T] at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing Callable = _CallableType(collections.abc.Callable, 2) Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 5=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 6=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 7=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 8=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 9=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 10=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 11=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 12=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 13=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 14=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 15=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 16=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 17=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 18=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 19=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 20=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 21=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 22=========== + # module: app.backend.prepdocslib.page + class SplitPage: + def __init__(self, page_num: int, text: str): + self.page_num = page_num + self.text = text + ===========changed ref 23=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_indexer_client(self) -> SearchIndexerClient: + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 24=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_index_client(self) -> SearchIndexClient: + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 25=========== + # module: app.backend.prepdocslib.strategy + USER_AGENT = "azure-search-chat-demo/1.0.0" + ===========changed ref 26=========== + # module: app.backend.prepdocslib.page + class SplitPage: + """ + A section of a page that has been split into a smaller chunk. + """ + ===========changed ref 27=========== + # module: app.backend.prepdocslib.page + class Page: + def __init__(self, page_num: int, offset: int, text: str): + self.page_num = page_num + self.offset = offset + self.text = text + ===========changed ref 28=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def __init__(self, endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str): + self.endpoint = endpoint + self.credential = credential + self.index_name = index_name + ===========changed ref 29=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __anext__(self): + if len(self.results) == 0: + raise StopAsyncIteration + return self.results.pop() + ===========changed ref 30=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_client(self) -> SearchClient: + return SearchClient(endpoint=self.endpoint, index_name=self.index_name, credential=self.credential) + ===========changed ref 31=========== + # module: app.backend.prepdocslib.jsonparser + class JsonParser(Parser): + """ + Concrete parser that can parse JSON into Page objects. A top-level object becomes a single Page, while a top-level array becomes multiple Page objects. + """ + ===========changed ref 32=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + """ + Class representing a connection to a search service + To learn more, please visit https://learn.microsoft.com/azure/search/search-what-is-azure-search + """ + ===========changed ref 33=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + """ + Abstract strategy for ingesting documents into a search service. It has a single setup step to perform any required initialization, and then a run step that actually ingests documents into the search service. + """ + ===========changed ref 34=========== # module: tests.test_content_file + class MockAiohttpClientResponse(aiohttp.ClientResponse): + def __init__(self, url, body_bytes, headers=None): + self._body = body_bytes + self._headers = headers + self._cache = {} + self.status = 200 + self.reason = "OK" + self._url = url + ===========changed ref 35=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + def parse(self, content: IO) -> AsyncGenerator[Page, None]: + data = content.read() + decoded_data = data.decode("utf-8") + text = cleanup_data(decoded_data) + yield Page(0, 0, text=text) +
scripts.auth_update/main
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<19>:<add> "http://localhost:5173/redirect",
# module: scripts.auth_update def main(): <0> if not test_authentication_enabled(): <1> print("Not updating authentication.") <2> exit(0) <3> <4> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) <5> auth_headers = await get_auth_headers(credential) <6> <7> uri = os.getenv("BACKEND_URI") <8> client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) <9> if client_app_id: <10> client_object_id = await get_application(auth_headers, client_app_id) <11> if client_object_id: <12> print(f"Updating redirect URIs for client app ID {client_app_id}...") <13> # Redirect URIs need to be relative to the deployed application <14> payload = { <15> "publicClient": {"redirectUris": []}, <16> "spa": { <17> "redirectUris": [ <18> "http://localhost:50505/redirect", <19> f"{uri}/redirect", <20> ] <21> }, <22> "web": { <23> "redirectUris": [ <24> f"{uri}/.auth/login/aad/callback", <25> ] <26> }, <27> } <28> await update_application(auth_headers, client_object_id, payload) <29> print(f"Application update for client app id {client_app_id} complete.") <30>
===========unchanged ref 0=========== at: auth_common get_auth_headers(credential: AsyncTokenCredential) get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str] update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object) test_authentication_enabled() at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 1=========== + # module: tests.test_upload + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 5=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 11=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 12=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 13=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 14=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 15=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 16=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 17=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 18=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 19=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 20=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 21=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 22=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 23=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 24=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 25=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 26=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 28=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 29=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 30=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 31=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 32=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 33=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 34=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 35=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 36=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 37=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 38=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 39=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 40=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def file_extension(self): + return os.path.splitext(self.content.name)[1] + ===========changed ref 41=========== + # module: app.backend.prepdocslib.page + class SplitPage: + def __init__(self, page_num: int, text: str): + self.page_num = page_num + self.text = text + ===========changed ref 42=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_indexer_client(self) -> SearchIndexerClient: + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 43=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_index_client(self) -> SearchIndexClient: + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 44=========== + # module: app.backend.prepdocslib.strategy + USER_AGENT = "azure-search-chat-demo/1.0.0" + ===========changed ref 45=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a local filesystem + """ + ===========changed ref 46=========== + # module: app.backend.prepdocs + def main(strategy: Strategy, setup_index: bool = True): + if setup_index: + await strategy.setup() + + await strategy.run() + ===========changed ref 47=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def list_paths(self) -> AsyncGenerator[str, None]: + async for p in self._list_paths(self.path_pattern): + yield p +
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<13>:<add> blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <del> blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <14>:<add> blob: Union[BlobDownloader, DatalakeDownloader] <17>:<add> logging.info("Path not found in general Blob container: %s", path) <add> if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: <add> try: <add> user_oid = auth_claims["oid"] <add> user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] <add> user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) <add> file_client = user_directory_client.get_file_client(path) <add> blob = await file_client.download_file() <add> except ResourceNotFoundError: <add> logging.exception("Path not found in DataLake: %s", path) <del> logging.exception("Path not found: %s", path) <18>:<add> abort(404) <add> else: <add> abort(
# module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): <0> """ <1> Serve content files from blob storage from within the app to keep the example self-contained. <2> *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in <3> if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control <4> if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to <5> This is also slow and memory hungry. <6> """ <7> # Remove page number from path, filename-1.txt -> filename.txt <8> # This shouldn't typically be necessary as browsers don't send hash fragments to servers <9> if path.find("#page=") > 0: <10> path_parts = path.rsplit("#page=", 1) <11> path = path_parts[0] <12> logging.info("Opening file %s", path) <13> blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <14> try: <15> blob = await blob_container_client.get_blob_client(path).download_blob() <16> except ResourceNotFoundError: <17> logging.exception("Path not found: %s", path) <18> abort(404) <19> if not blob.properties or not blob.properties.has_key("content_settings"): <20> abort(404) <21> mime_type = blob.properties["content_settings"]["content_type"] <22> if mime_type == "application/octet-stream": <23> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <24> blob_file = io.BytesIO() <25> await blob.readinto(blob_file) <26> blob_file.seek(0) <27> return await send_file(blob_file</s>
===========below chunk 0=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): # offset: 1 ===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" at: decorators authenticated_path(route_fn: Callable[[str, Dict[str, Any]], Any]) at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: mimetypes guess_type(url: Union[Text, PathLike[str]], strict: bool=...) -> Tuple[Optional[str], Optional[str]] ===========changed ref 0=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 3=========== + # module: tests.test_upload + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 5=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 13=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 14=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 15=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 16=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 17=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 18=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 19=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 20=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 21=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 22=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 23=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 24=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 25=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 26=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 28=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 30=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 33=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 34=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 35=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 36=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 37=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 38=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 39=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 40=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 41=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 42=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 43=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 44=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def file_extension(self): + return os.path.splitext(self.content.name)[1] + ===========changed ref 45=========== + # module: app.backend.prepdocslib.page + class SplitPage: + def __init__(self, page_num: int, text: str): + self.page_num = page_num + self.text = text +
app.backend.app/config
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<5>:<add> "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED],
# module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): <0> return jsonify( <1> { <2> "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], <3> "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], <4> "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], <5> } <6> ) <7>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEMANTIC_RANKER_DEPLOYED = "semantic_ranker_deployed" CONFIG_VECTOR_SEARCH_ENABLED = "vector_search_enabled" ===========changed ref 0=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): """ Serve content files from blob storage from within the app to keep the example self-contained. *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to This is also slow and memory hungry. """ # Remove page number from path, filename-1.txt -> filename.txt # This shouldn't typically be necessary as browsers don't send hash fragments to servers if path.find("#page=") > 0: path_parts = path.rsplit("#page=", 1) path = path_parts[0] logging.info("Opening file %s", path) + blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] - blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] + blob: Union[BlobDownloader, DatalakeDownloader] try: blob = await blob_container_client.get_blob_client(path).download_blob() except ResourceNotFoundError: + logging.info("Path not found in general Blob container: %s", path) + if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: + try: + user_oid = auth_claims["oid"] + user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] + user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) + file_client = user_directory_client.get_file_client(path)</s> ===========changed ref 1=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): # offset: 1 <s>_directory_client(user_oid) + file_client = user_directory_client.get_file_client(path) + blob = await file_client.download_file() + except ResourceNotFoundError: + logging.exception("Path not found in DataLake: %s", path) - logging.exception("Path not found: %s", path) + abort(404) + else: + abort(404) - abort(404) if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========changed ref 2=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 5=========== + # module: tests.test_upload + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 14=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 15=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 17=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 18=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 19=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 20=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 21=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 22=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 23=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 24=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 25=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 26=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 28=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 30=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 34=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 35=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 36=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client +
app.backend.app/close_clients
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<2>:<add> if current_app.config.get(CONFIG_USER_BLOB_CONTAINER_CLIENT): <add> await current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT].close()
# module: app.backend.app @bp.after_app_serving async def close_clients(): <0> await current_app.config[CONFIG_SEARCH_CLIENT].close() <1> await current_app.config[CONFIG_BLOB_CONTAINER_CLIENT].close() <2>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_SEARCH_CLIENT = "search_client" ===========changed ref 0=========== # module: app.backend.app + @bp.get("/list_uploaded") + @authenticated + async def list_uploaded(auth_claims: dict[str, Any]): + user_oid = auth_claims["oid"] + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] + files = [] + try: + all_paths = user_blob_container_client.get_paths(path=user_oid) + async for path in all_paths: + files.append(path.name.split("/", 1)[1]) + except ResourceNotFoundError as error: + if error.status_code != 404: + current_app.logger.exception("Error listing uploaded files", error) + return jsonify(files), 200 + ===========changed ref 1=========== # module: app.backend.app + @bp.post("/delete_uploaded") + @authenticated + async def delete_uploaded(auth_claims: dict[str, Any]): + request_json = await request.get_json() + filename = request_json.get("filename") + user_oid = auth_claims["oid"] + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] + user_directory_client = user_blob_container_client.get_directory_client(user_oid) + file_client = user_directory_client.get_file_client(filename) + await file_client.delete_file() + ingester = current_app.config[CONFIG_INGESTER] + await ingester.remove_file(filename, user_oid) + return jsonify({"message": f"File {filename} deleted successfully"}), 200 + ===========changed ref 2=========== # module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): return jsonify( { "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], + "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], } ) ===========changed ref 3=========== # module: app.backend.app + @bp.post("/upload") + @authenticated + async def upload(auth_claims: dict[str, Any]): + request_files = await request.files + if "file" not in request_files: + # If no files were included in the request, return an error response + return jsonify({"message": "No file part in the request", "status": "failed"}), 400 + + user_oid = auth_claims["oid"] + file = request_files.getlist("file")[0] + user_blob_container_client: FileSystemClient = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] + user_directory_client = user_blob_container_client.get_directory_client(user_oid) + try: + await user_directory_client.get_directory_properties() + except ResourceNotFoundError: + current_app.logger.info("Creating directory for user %s", user_oid) + await user_directory_client.create_directory() + await user_directory_client.set_access_control(owner=user_oid) + file_client = user_directory_client.get_file_client(file.filename) + file_io = file + file_io.name = file.filename + file_io = io.BufferedReader(file_io) + await file_client.upload_data(file_io, overwrite=True, metadata={"UploadedBy": user_oid}) + file_io.seek(0) + ingester = current_app.config[CONFIG_INGESTER] + await ingester.add_file(File(content=file_io, acls={"oids": [user_oid]})) + return jsonify({"message": "File uploaded successfully"}), 200 + ===========changed ref 4=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): """ Serve content files from blob storage from within the app to keep the example self-contained. *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to This is also slow and memory hungry. """ # Remove page number from path, filename-1.txt -> filename.txt # This shouldn't typically be necessary as browsers don't send hash fragments to servers if path.find("#page=") > 0: path_parts = path.rsplit("#page=", 1) path = path_parts[0] logging.info("Opening file %s", path) + blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] - blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] + blob: Union[BlobDownloader, DatalakeDownloader] try: blob = await blob_container_client.get_blob_client(path).download_blob() except ResourceNotFoundError: + logging.info("Path not found in general Blob container: %s", path) + if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: + try: + user_oid = auth_claims["oid"] + user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] + user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) + file_client = user_directory_client.get_file_client(path)</s> ===========changed ref 5=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path + async def content_file(path: str, auth_claims: Dict[str, Any]): - async def content_file(path: str): # offset: 1 <s>_directory_client(user_oid) + file_client = user_directory_client.get_file_client(path) + blob = await file_client.download_file() + except ResourceNotFoundError: + logging.exception("Path not found in DataLake: %s", path) - logging.exception("Path not found: %s", path) + abort(404) + else: + abort(404) - abort(404) if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========changed ref 6=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.blobmanager + +
tests.test_authenticationhelper/test_auth_setup
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<1>:<add> result = helper.get_auth_setup_for_client() <del> assert helper.get_auth_setup_for_client() == { <2>:<del> "useLogin": True, <3>:<del> "requireAccessControl": False, <4>:<del> "msalConfig": { <5>:<del> "auth": { <6>:<del> "clientId": "CLIENT_APP", <7>:<del> "authority": "https://login.microsoftonline.com/TENANT_ID", <8>:<del> "redirectUri": "/redirect", <9>:<del> "postLogoutRedirectUri": "/", <10>:<del> "navigateToLoginRequestUrl": False, <11>:<del> }, <12>:<del> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <13>:<del> }, <14>:<del> "loginRequest": { <15>:<del> "scopes": [".default"], <16>:<del> }, <17>:<del> "tokenRequest": { <18>:<del> "scopes": ["api://SERVER_APP/access_as_user"], <19>:<del> }, <20>:<del> } <21>:<add> snapshot.assert_match(json.dumps(result, indent=4), "result.json")
# module: tests.test_authenticationhelper + def test_auth_setup(mock_confidential_client_success, mock_validate_token_success, snapshot): - def test_auth_setup(mock_confidential_client_success, mock_validate_token_success): <0> helper = create_authentication_helper() <1> assert helper.get_auth_setup_for_client() == { <2> "useLogin": True, <3> "requireAccessControl": False, <4> "msalConfig": { <5> "auth": { <6> "clientId": "CLIENT_APP", <7> "authority": "https://login.microsoftonline.com/TENANT_ID", <8> "redirectUri": "/redirect", <9> "postLogoutRedirectUri": "/", <10> "navigateToLoginRequestUrl": False, <11> }, <12> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <13> }, <14> "loginRequest": { <15> "scopes": [".default"], <16> }, <17> "tokenRequest": { <18> "scopes": ["api://SERVER_APP/access_as_user"], <19> }, <20> } <21>
===========unchanged ref 0=========== at: tests.conftest mock_confidential_client_success(monkeypatch) at: tests.test_authenticationhelper create_authentication_helper(require_access_control: bool=False) ===========changed ref 0=========== + # module: app.backend.prepdocslib.fileprocessor + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 4=========== + # module: tests.test_upload + + ===========changed ref 5=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 14=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 15=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 16=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 17=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 18=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 19=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 20=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 21=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 22=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 23=========== + # module: app.backend.prepdocslib.fileprocessor + @dataclass(frozen=True) + class FileProcessor: + parser: Parser + splitter: TextSplitter + ===========changed ref 24=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 25=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 26=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 28=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 30=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 34=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 35=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 36=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 37=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 38=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 39=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 40=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 41=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 42=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 43=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 44=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 45=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 46=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def file_extension(self): + return os.path.splitext(self.content.name)[1] + ===========changed ref 47=========== + # module: app.backend.prepdocslib.page + class SplitPage: + def __init__(self, page_num: int, text: str): + self.page_num = page_num + self.text = text + ===========changed ref 48=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_indexer_client(self) -> SearchIndexerClient: + return SearchIndexerClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 49=========== + # module: app.backend.prepdocslib.strategy + class SearchInfo: + def create_search_index_client(self) -> SearchIndexClient: + return SearchIndexClient(endpoint=self.endpoint, credential=self.credential) + ===========changed ref 50=========== + # module: app.backend.prepdocslib.strategy + USER_AGENT = "azure-search-chat-demo/1.0.0" + ===========changed ref 51=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a local filesystem + """ +
tests.test_authenticationhelper/test_auth_setup_required_access_control
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<1>:<add> result = helper.get_auth_setup_for_client() <del> assert helper.get_auth_setup_for_client() == { <2>:<del> "useLogin": True, <3>:<del> "requireAccessControl": True, <4>:<del> "msalConfig": { <5>:<del> "auth": { <6>:<del> "clientId": "CLIENT_APP", <7>:<del> "authority": "https://login.microsoftonline.com/TENANT_ID", <8>:<del> "redirectUri": "/redirect", <9>:<del> "postLogoutRedirectUri": "/", <10>:<del> "navigateToLoginRequestUrl": False, <11>:<del> }, <12>:<del> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <13>:<del> }, <14>:<del> "loginRequest": { <15>:<del> "scopes": [".default"], <16>:<del> }, <17>:<del> "tokenRequest": { <18>:<del> "scopes": ["api://SERVER_APP/access_as_user"], <19>:<del> }, <20>:<del> } <21>:<add> snapshot.assert_match(json.dumps(result, indent=4), "result.json")
# module: tests.test_authenticationhelper + def test_auth_setup_required_access_control(mock_confidential_client_success, mock_validate_token_success, snapshot): - def test_auth_setup_required_access_control(mock_confidential_client_success, mock_validate_token_success): <0> helper = create_authentication_helper(require_access_control=True) <1> assert helper.get_auth_setup_for_client() == { <2> "useLogin": True, <3> "requireAccessControl": True, <4> "msalConfig": { <5> "auth": { <6> "clientId": "CLIENT_APP", <7> "authority": "https://login.microsoftonline.com/TENANT_ID", <8> "redirectUri": "/redirect", <9> "postLogoutRedirectUri": "/", <10> "navigateToLoginRequestUrl": False, <11> }, <12> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <13> }, <14> "loginRequest": { <15> "scopes": [".default"], <16> }, <17> "tokenRequest": { <18> "scopes": ["api://SERVER_APP/access_as_user"], <19> }, <20> } <21>
===========unchanged ref 0=========== at: tests.test_authenticationhelper create_authentication_helper(require_access_control: bool=False) ===========changed ref 0=========== # module: tests.test_authenticationhelper + def test_auth_setup(mock_confidential_client_success, mock_validate_token_success, snapshot): - def test_auth_setup(mock_confidential_client_success, mock_validate_token_success): helper = create_authentication_helper() + result = helper.get_auth_setup_for_client() - assert helper.get_auth_setup_for_client() == { - "useLogin": True, - "requireAccessControl": False, - "msalConfig": { - "auth": { - "clientId": "CLIENT_APP", - "authority": "https://login.microsoftonline.com/TENANT_ID", - "redirectUri": "/redirect", - "postLogoutRedirectUri": "/", - "navigateToLoginRequestUrl": False, - }, - "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, - }, - "loginRequest": { - "scopes": [".default"], - }, - "tokenRequest": { - "scopes": ["api://SERVER_APP/access_as_user"], - }, - } + snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== + # module: app.backend.prepdocslib.fileprocessor + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 5=========== + # module: tests.test_upload + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 14=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 15=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 17=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 18=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 19=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 20=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 21=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 22=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 23=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 24=========== + # module: app.backend.prepdocslib.fileprocessor + @dataclass(frozen=True) + class FileProcessor: + parser: Parser + splitter: TextSplitter + ===========changed ref 25=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 26=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 28=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 29=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 30=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 31=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 34=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 35=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 36=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 37=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 38=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 39=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 40=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 41=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 42=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 43=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 44=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 45=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 46=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} +
tests.conftest/mock_acs_search
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<0>:<del> monkeypatch.setattr(SearchClient, "search", mock_search)
# module: tests.conftest @pytest.fixture def mock_acs_search(monkeypatch): <0> monkeypatch.setattr(SearchClient, "search", mock_search) <1> monkeypatch.setattr(SearchClient, "search", mock_search) <2> <3> async def mock_get_index(*args, **kwargs): <4> return MockSearchIndex <5> <6> monkeypatch.setattr(SearchIndexClient, "get_index", mock_get_index) <7>
===========changed ref 0=========== + # module: app.backend.prepdocslib.embeddings + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.fileprocessor + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 5=========== + # module: tests.test_upload + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 14=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 15=========== + # module: app.backend.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_client(self) -> AsyncOpenAI: + raise NotImplementedError + ===========changed ref 16=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 18=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 19=========== + # module: app.backend.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 20=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 21=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 22=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 23=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 24=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 25=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 26=========== + # module: app.backend.prepdocslib.embeddings + logger = logging.getLogger("ingester") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.fileprocessor + @dataclass(frozen=True) + class FileProcessor: + parser: Parser + splitter: TextSplitter + ===========changed ref 28=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 30=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 32=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 34=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 35=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 36=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 37=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 38=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 39=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 40=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 41=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 42=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 43=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 44=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 45=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 46=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 47=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 48=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 49=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 50=========== + # module: app.backend.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): + self.token_provider = token_provider + self.endpoint = endpoint + ===========changed ref 51=========== + # module: app.backend.prepdocslib.embeddings + class EmbeddingBatch: + def __init__(self, texts: List[str], token_length: int): + self.texts = texts + self.token_length = token_length + ===========changed ref 52=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def file_extension(self): + return os.path.splitext(self.content.name)[1] + ===========changed ref 53=========== + # module: app.backend.prepdocslib.page + class SplitPage: + def __init__(self, page_num: int, text: str): + self.page_num = page_num + self.text = text +
tests.conftest/auth_client
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<5>:<add> monkeypatch.setenv("USE_USER_UPLOAD", "true") <add> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-userstorage-account") <add> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-userstorage-container") <add> monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") <add> monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") <add> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-documentintelligence-service")
# module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, mock_get_secret, request, ): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> for key, value in request.param.items(): <6> monkeypatch.setenv(key, value) <7> <8> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <9> mock_default_azure_credential.return_value = MockAzureCredential() <10> quart_app = app.create_app() <11> <12> async with quart_app.test_app() as test_app: <13> quart_app.config.update({"TESTING": True}) <14> mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <15> mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <16> client = test_app.test_client() <17> client.config = quart_app.config <18> <19> yield client <20>
===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_acs_search(monkeypatch): - monkeypatch.setattr(SearchClient, "search", mock_search) monkeypatch.setattr(SearchClient, "search", mock_search) async def mock_get_index(*args, **kwargs): return MockSearchIndex monkeypatch.setattr(SearchIndexClient, "get_index", mock_get_index) ===========changed ref 1=========== # module: tests.conftest envs = [ { "OPENAI_HOST": "openai", "OPENAI_API_KEY": "secretkey", "OPENAI_ORGANIZATION": "organization", }, { "OPENAI_HOST": "azure", "AZURE_OPENAI_SERVICE": "test-openai-service", "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", "USE_GPT4V": "true", "AZURE_OPENAI_GPT4V_MODEL": "gpt-4", "VISION_SECRET_NAME": "mysecret", "VISION_ENDPOINT": "https://testvision.cognitiveservices.azure.com/", "AZURE_KEY_VAULT_NAME": "mykeyvault", }, ] auth_envs = [ { "OPENAI_HOST": "azure", "AZURE_OPENAI_SERVICE": "test-openai-service", "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", "AZURE_USE_AUTHENTICATION": "true", + "AZURE_USER_STORAGE_ACCOUNT": "test-user-storage-account", + "AZURE_USER_STORAGE_CONTAINER": "test-user-storage-container", "AZURE_SERVER_APP_ID": "SERVER_APP", "AZURE_SERVER_APP_SECRET": "SECRET", "AZURE_CLIENT_APP_ID": "CLIENT_APP", "AZURE_TENANT_ID": "TENANT_ID", }, ] ===========changed ref 2=========== + # module: app.backend.prepdocslib.embeddings + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.fileprocessor + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 5=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 6=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 7=========== + # module: tests.test_upload + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 14=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 15=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 16=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 17=========== + # module: app.backend.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_client(self) -> AsyncOpenAI: + raise NotImplementedError + ===========changed ref 18=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 19=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 20=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 21=========== + # module: app.backend.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 22=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 23=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 24=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 25=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 26=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 27=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 28=========== + # module: app.backend.prepdocslib.embeddings + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.fileprocessor + @dataclass(frozen=True) + class FileProcessor: + parser: Parser + splitter: TextSplitter + ===========changed ref 30=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 34=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 35=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 36=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 37=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 38=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 39=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 40=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 41=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 42=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 43=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 44=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 45=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) +
scripts.auth_init/create_client_app_payload
Modified
Azure-Samples~azure-search-openai-demo
853cc726ca9d6e0d10b595024fef4f5fd70826b4
Optional upload documents feature (#1395)
<7>:<add> "spa": {"redirectUris": ["http://localhost:50505/redirect", "http://localhost:5173/redirect"]}, <del> "spa": {"redirectUris": ["http://localhost:50505/redirect"]},
# module: scripts.auth_init def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int): <0> return { <1> "displayName": f"Azure Search OpenAI Chat Client App {identifier}", <2> "signInAudience": "AzureADMyOrg", <3> "web": { <4> "redirectUris": ["http://localhost:50505/.auth/login/aad/callback"], <5> "implicitGrantSettings": {"enableIdTokenIssuance": True}, <6> }, <7> "spa": {"redirectUris": ["http://localhost:50505/redirect"]}, <8> "requiredResourceAccess": [ <9> # access_as_user from server app <10> { <11> "resourceAppId": server_app_id, <12> "resourceAccess": [ <13> { <14> "id": server_app_permission_setup_payload["api"]["oauth2PermissionScopes"][0]["id"], <15> "type": "Scope", <16> } <17> ], <18> }, <19> # Graph User.Read <20> { <21> "resourceAppId": "00000003-0000-0000-c000-000000000000", <22> "resourceAccess": [{"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d", "type": "Scope"}], <23> }, <24> ], <25> } <26>
===========unchanged ref 0=========== at: typing Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== + # module: app.backend.prepdocslib.searchmanager + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.embeddings + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.fileprocessor + + ===========changed ref 3=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + + ===========changed ref 4=========== + # module: app.backend.prepdocslib.filestrategy + + ===========changed ref 5=========== + # module: app.backend.prepdocslib.blobmanager + + ===========changed ref 6=========== + # module: tests.test_upload + + ===========changed ref 7=========== + # module: app.backend.prepdocslib.listfilestrategy + + ===========changed ref 8=========== + # module: app.backend.prepdocslib.textsplitter + + ===========changed ref 9=========== + # module: app.backend.prepdocslib.parser + + ===========changed ref 10=========== + # module: app.backend.prepdocslib.pdfparser + + ===========changed ref 11=========== + # module: app.backend.prepdocslib.htmlparser + + ===========changed ref 12=========== + # module: app.backend.prepdocslib.page + + ===========changed ref 13=========== + # module: app.backend.prepdocslib.textparser + + ===========changed ref 14=========== + # module: app.backend.prepdocslib.jsonparser + + ===========changed ref 15=========== + # module: app.backend.prepdocslib.strategy + + ===========changed ref 16=========== + # module: app.backend.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_client(self) -> AsyncOpenAI: + raise NotImplementedError + ===========changed ref 17=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 18=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def run(self): + raise NotImplementedError + ===========changed ref 19=========== + # module: app.backend.prepdocslib.strategy + class Strategy(ABC): + def setup(self): + raise NotImplementedError + ===========changed ref 20=========== + # module: app.backend.prepdocslib.embeddings + class ExtraArgs(TypedDict, total=False): + dimensions: int + ===========changed ref 21=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + ===========changed ref 22=========== # module: tests.test_searchmanager + class AsyncSearchResultsIterator: + def get_count(self): + return len(self.results) + ===========changed ref 23=========== # module: tests.mocks class MockAsyncSearchResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 24=========== + # module: app.backend.prepdocslib.htmlparser + class LocalHTMLParser(Parser): + """Parses HTML text into Page objects.""" + ===========changed ref 25=========== # module: tests.mocks + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 26=========== # module: tests.mocks class MockBlob: + def readinto(self, buffer: BytesIO): + buffer.write(b"test") + ===========changed ref 27=========== + # module: app.backend.prepdocslib.searchmanager + logger = logging.getLogger("ingester") + ===========changed ref 28=========== + # module: app.backend.prepdocslib.embeddings + logger = logging.getLogger("ingester") + ===========changed ref 29=========== + # module: app.backend.prepdocslib.fileprocessor + @dataclass(frozen=True) + class FileProcessor: + parser: Parser + splitter: TextSplitter + ===========changed ref 30=========== + # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("ingester") + ===========changed ref 31=========== + # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("ingester") + ===========changed ref 32=========== + # module: app.backend.prepdocs + logger = logging.getLogger("ingester") + ===========changed ref 33=========== + # module: app.backend.prepdocslib.blobmanager + class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 34=========== + # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("ingester") + ===========changed ref 35=========== + # module: app.backend.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str): + self.path_pattern = path_pattern + ===========changed ref 36=========== + # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("ingester") + ===========changed ref 37=========== + # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("ingester") + ===========changed ref 38=========== + # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("ingester") + ===========changed ref 39=========== + # module: app.backend.prepdocslib.htmlparser + logger = logging.getLogger("ingester") + ===========changed ref 40=========== + # module: app.backend.prepdocslib.textparser + class TextParser(Parser): + """Parses simple text into a Page object.""" + ===========changed ref 41=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: - return self.create_embedding_response - ===========changed ref 42=========== # module: tests.mocks + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 43=========== # module: tests.test_searchmanager - class MockClient: - def __init__(self, embeddings_client): - self.embeddings = embeddings_client - ===========changed ref 44=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 45=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 46=========== + # module: app.backend.prepdocslib.textsplitter + class SimpleTextSplitter(TextSplitter): + def __init__(self, max_object_length: int = 1000): + self.max_object_length = max_object_length + ===========changed ref 47=========== + # module: app.backend.prepdocslib.strategy + class DocumentAction(Enum): + Add = 0 + Remove = 1 + RemoveAll = 2 + ===========changed ref 48=========== # module: tests.mocks + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 49=========== + # module: app.backend.prepdocslib.parser + class Parser(ABC): + """ + Abstract parser that parses content into Page objects + """ + ===========changed ref 50=========== # module: tests.test_searchmanager - class MockEmbeddingsClient: - def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): - self.create_embedding_response = create_embedding_response - ===========changed ref 51=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 52=========== + # module: app.backend.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, endpoint: str, token_provider: Callable[[], Awaitable[str]]): + self.token_provider = token_provider + self.endpoint = endpoint + ===========changed ref 53=========== + # module: app.backend.prepdocslib.embeddings + class EmbeddingBatch: + def __init__(self, texts: List[str], token_length: int): + self.texts = texts + self.token_length = token_length + ===========changed ref 54=========== + # module: app.backend.prepdocslib.listfilestrategy + class File: + def file_extension(self): + return os.path.splitext(self.content.name)[1] +
scripts.auth_update/main
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<4>:<add> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"])) <del> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) <5>:<add> <add> scopes = ["https://graph.microsoft.com/.default"] <add> graph_client = GraphServiceClient(credentials=credential, scopes=scopes) <del> auth_headers = await get_auth_headers(credential) <10>:<add> client_object_id = await get_application(graph_client, client_app_id) <del> client_object_id = await get_application(auth_headers, client_app_id) <14>:<add> app = Application( <add> public_client=PublicClientApplication(redirect_uris=[]), <add> spa=SpaApplication( <del> payload = { <15>:<del> "publicClient": {"redirectUris": []}, <16>:<del> "spa": { <17>:<add> redirect_uris=[ <del> "redirectUris": [ <22>:<add> ), <del> }, <23>:<add> web=WebApplication( <del> "web": { <24>:<add> redirect_uris=[ <del> "redirectUris": [ <27>:<add> ), <del> }, <28>:<add> ) <del> } <29>:<add> await graph_client.applications.by_application_id(client_object_id).patch(app) <del> await update_application(auth_headers, client_object_id, payload)
# module: scripts.auth_update def main(): <0> if not test_authentication_enabled(): <1> print("Not updating authentication.") <2> exit(0) <3> <4> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) <5> auth_headers = await get_auth_headers(credential) <6> <7> uri = os.getenv("BACKEND_URI") <8> client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) <9> if client_app_id: <10> client_object_id = await get_application(auth_headers, client_app_id) <11> if client_object_id: <12> print(f"Updating redirect URIs for client app ID {client_app_id}...") <13> # Redirect URIs need to be relative to the deployed application <14> payload = { <15> "publicClient": {"redirectUris": []}, <16> "spa": { <17> "redirectUris": [ <18> "http://localhost:50505/redirect", <19> "http://localhost:5173/redirect", <20> f"{uri}/redirect", <21> ] <22> }, <23> "web": { <24> "redirectUris": [ <25> f"{uri}/.auth/login/aad/callback", <26> ] <27> }, <28> } <29> await update_application(auth_headers, client_object_id, payload) <30> print(f"Application update for client app id {client_app_id} complete.") <31>
===========unchanged ref 0=========== at: auth_common get_application(graph_client: GraphServiceClient, client_id: str) -> Optional[str] test_authentication_enabled() at: os environ = _createenviron() getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]
scripts.auth_init/create_application
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<0>:<del> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1>:<del> async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: <2>:<del> if response.status != 201: <3>:<del> raise Exception(await response.json()) <4>:<del> response_json = await response.json() <5>:<del> object_id = response_json["id"] <6>:<del> client_id = response_json["appId"] <7>:<add> app = await graph_client.applications.post(request_app) <add> object_id = app.id <add> client_id = app.app_id <8>:<del> async with session.post( <9>:<del> "https://graph.microsoft.com/v1.0/servicePrincipals", <10>:<del> json={"appId": client_id, "displayName": app_payload["displayName"]}, <11>:<del> ) as response: <12>:<del> if response.status != 201: <13>:<del> raise Exception(await response.json()) <14>:<del> <15>:<add> # Create a service principal <add> request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) <add> await graph_client.service_principals.post(request_principal)
# module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1> async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: <2> if response.status != 201: <3> raise Exception(await response.json()) <4> response_json = await response.json() <5> object_id = response_json["id"] <6> client_id = response_json["appId"] <7> <8> async with session.post( <9> "https://graph.microsoft.com/v1.0/servicePrincipals", <10> json={"appId": client_id, "displayName": app_payload["displayName"]}, <11> ) as response: <12> if response.status != 201: <13> raise Exception(await response.json()) <14> <15> return object_id, client_id <16>
===========changed ref 0=========== # module: scripts.auth_update def main(): if not test_authentication_enabled(): print("Not updating authentication.") exit(0) + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"])) - credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + + scopes = ["https://graph.microsoft.com/.default"] + graph_client = GraphServiceClient(credentials=credential, scopes=scopes) - auth_headers = await get_auth_headers(credential) uri = os.getenv("BACKEND_URI") client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) if client_app_id: + client_object_id = await get_application(graph_client, client_app_id) - client_object_id = await get_application(auth_headers, client_app_id) if client_object_id: print(f"Updating redirect URIs for client app ID {client_app_id}...") # Redirect URIs need to be relative to the deployed application + app = Application( + public_client=PublicClientApplication(redirect_uris=[]), + spa=SpaApplication( - payload = { - "publicClient": {"redirectUris": []}, - "spa": { + redirect_uris=[ - "redirectUris": [ "http://localhost:50505/redirect", "http://localhost:5173/redirect", f"{uri}/redirect", ] + ), - }, + web=WebApplication( - "web": { + redirect_uris=[ - "redirectUris": [ f"{uri}/.auth/login/aad/callback", ] + ), - }, + ) - } + await</s> ===========changed ref 1=========== # module: scripts.auth_update def main(): # offset: 1 <s>/aad/callback", ] + ), - }, + ) - } + await graph_client.applications.by_application_id(client_object_id).patch(app) - await update_application(auth_headers, client_object_id, payload) print(f"Application update for client app id {client_app_id} complete.")
scripts.auth_init/add_client_secret
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<0>:<del> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1>:<del> async with session.post( <2>:<del> f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", <3>:<del> json={"passwordCredential": {"displayName": "secret"}}, <4>:<del> ) as response: <5>:<del> response_json = await response.json() <6>:<del> if response.status == 200: <7>:<del> return response_json["secretText"] <8>:<add> request_password = AddPasswordPostRequestBody( <add> password_credential=PasswordCredential(display_name="WebAppSecret"), <add> ) <add> result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password) <add> return result.secret_text <9>:<del> raise Exception(response_json) <10>:<del>
# module: scripts.auth_init + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str: - def add_client_secret(auth_headers: Dict[str, str], object_id: str): <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1> async with session.post( <2> f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", <3> json={"passwordCredential": {"displayName": "secret"}}, <4> ) as response: <5> response_json = await response.json() <6> if response.status == 200: <7> return response_json["secretText"] <8> <9> raise Exception(response_json) <10>
===========changed ref 0=========== # module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: - if response.status != 201: - raise Exception(await response.json()) - response_json = await response.json() - object_id = response_json["id"] - client_id = response_json["appId"] + app = await graph_client.applications.post(request_app) + object_id = app.id + client_id = app.app_id - async with session.post( - "https://graph.microsoft.com/v1.0/servicePrincipals", - json={"appId": client_id, "displayName": app_payload["displayName"]}, - ) as response: - if response.status != 201: - raise Exception(await response.json()) - + # Create a service principal + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) + await graph_client.service_principals.post(request_principal) return object_id, client_id ===========changed ref 1=========== # module: scripts.auth_update def main(): if not test_authentication_enabled(): print("Not updating authentication.") exit(0) + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"])) - credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + + scopes = ["https://graph.microsoft.com/.default"] + graph_client = GraphServiceClient(credentials=credential, scopes=scopes) - auth_headers = await get_auth_headers(credential) uri = os.getenv("BACKEND_URI") client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) if client_app_id: + client_object_id = await get_application(graph_client, client_app_id) - client_object_id = await get_application(auth_headers, client_app_id) if client_object_id: print(f"Updating redirect URIs for client app ID {client_app_id}...") # Redirect URIs need to be relative to the deployed application + app = Application( + public_client=PublicClientApplication(redirect_uris=[]), + spa=SpaApplication( - payload = { - "publicClient": {"redirectUris": []}, - "spa": { + redirect_uris=[ - "redirectUris": [ "http://localhost:50505/redirect", "http://localhost:5173/redirect", f"{uri}/redirect", ] + ), - }, + web=WebApplication( - "web": { + redirect_uris=[ - "redirectUris": [ f"{uri}/.auth/login/aad/callback", ] + ), - }, + ) - } + await</s> ===========changed ref 2=========== # module: scripts.auth_update def main(): # offset: 1 <s>/aad/callback", ] + ), - }, + ) - } + await graph_client.applications.by_application_id(client_object_id).patch(app) - await update_application(auth_headers, client_object_id, payload) print(f"Application update for client app id {client_app_id} complete.")
scripts.auth_init/create_or_update_application_with_secret
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<5>:<add> object_id = await get_application(graph_client, app_id) <del> object_id = await get_application(auth_headers, app_id) <9>:<add> await graph_client.applications.by_application_id(object_id).patch(request_app) <del> await update_application(auth_headers, object_id, app_payload) <12>:<add> object_id, app_id = await create_application(graph_client, request_app) <del> object_id, app_id = await create_application(auth_headers, app_payload) <18>:<add> client_secret = await add_client_secret(graph_client, object_id) <del> client_secret = await add_client_secret(auth_headers, object_id)
# module: scripts.auth_init def create_or_update_application_with_secret( + graph_client: GraphServiceClient, app_id_env_var: str, app_secret_env_var: str, request_app: Application - auth_headers: Dict[str, str], app_id_env_var: str, app_secret_env_var: str, app_payload: Dict[str, Any] ) -> Tuple[str, str, bool]: <0> app_id = os.getenv(app_id_env_var, "no-id") <1> created_app = False <2> object_id = None <3> if app_id != "no-id": <4> print(f"Checking if application {app_id} exists") <5> object_id = await get_application(auth_headers, app_id) <6> <7> if object_id: <8> print("Application already exists, not creating new one") <9> await update_application(auth_headers, object_id, app_payload) <10> else: <11> print("Creating application registration") <12> object_id, app_id = await create_application(auth_headers, app_payload) <13> update_azd_env(app_id_env_var, app_id) <14> created_app = True <15> <16> if object_id and os.getenv(app_secret_env_var, "no-secret") == "no-secret": <17> print(f"Adding client secret to {app_id}") <18> client_secret = await add_client_secret(auth_headers, object_id) <19> update_azd_env(app_secret_env_var, client_secret) <20> <21> return (object_id, app_id, created_app) <22>
===========changed ref 0=========== # module: scripts.auth_init + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str: - def add_client_secret(auth_headers: Dict[str, str], object_id: str): - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post( - f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", - json={"passwordCredential": {"displayName": "secret"}}, - ) as response: - response_json = await response.json() - if response.status == 200: - return response_json["secretText"] + request_password = AddPasswordPostRequestBody( + password_credential=PasswordCredential(display_name="WebAppSecret"), + ) + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password) + return result.secret_text - raise Exception(response_json) - ===========changed ref 1=========== # module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: - if response.status != 201: - raise Exception(await response.json()) - response_json = await response.json() - object_id = response_json["id"] - client_id = response_json["appId"] + app = await graph_client.applications.post(request_app) + object_id = app.id + client_id = app.app_id - async with session.post( - "https://graph.microsoft.com/v1.0/servicePrincipals", - json={"appId": client_id, "displayName": app_payload["displayName"]}, - ) as response: - if response.status != 201: - raise Exception(await response.json()) - + # Create a service principal + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) + await graph_client.service_principals.post(request_principal) return object_id, client_id ===========changed ref 2=========== # module: scripts.auth_update def main(): if not test_authentication_enabled(): print("Not updating authentication.") exit(0) + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"])) - credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + + scopes = ["https://graph.microsoft.com/.default"] + graph_client = GraphServiceClient(credentials=credential, scopes=scopes) - auth_headers = await get_auth_headers(credential) uri = os.getenv("BACKEND_URI") client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) if client_app_id: + client_object_id = await get_application(graph_client, client_app_id) - client_object_id = await get_application(auth_headers, client_app_id) if client_object_id: print(f"Updating redirect URIs for client app ID {client_app_id}...") # Redirect URIs need to be relative to the deployed application + app = Application( + public_client=PublicClientApplication(redirect_uris=[]), + spa=SpaApplication( - payload = { - "publicClient": {"redirectUris": []}, - "spa": { + redirect_uris=[ - "redirectUris": [ "http://localhost:50505/redirect", "http://localhost:5173/redirect", f"{uri}/redirect", ] + ), - }, + web=WebApplication( - "web": { + redirect_uris=[ - "redirectUris": [ f"{uri}/.auth/login/aad/callback", ] + ), - }, + ) - } + await</s> ===========changed ref 3=========== # module: scripts.auth_update def main(): # offset: 1 <s>/aad/callback", ] + ), - }, + ) - } + await graph_client.applications.by_application_id(client_object_id).patch(app) - await update_application(auth_headers, client_object_id, payload) print(f"Application update for client app id {client_app_id} complete.")
scripts.auth_init/main
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<5>:<add> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"])) <del> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) <6>:<add> <add> scopes = ["https://graph.microsoft.com/.default"] <add> graph_client = GraphServiceClient(credentials=credential, scopes=scopes) <del> auth_headers = await get_auth_headers(credential) <10>:<add> graph_client, <del> auth_headers, <13>:<add> request_app=server_app_initial(app_identifier), <del> app_payload=create_server_app_initial_payload(app_identifier), <16>:<add> server_app_permission = server_app_permission_setup(server_app_id) <del> server_app_permission_payload = create_server_app_permission_setup_payload(server_app_id) <17>:<add> await graph_client.applications.by_application_id(server_object_id).patch(server_app_permission) <add> <del> await update_application(auth_headers, object_id=server_object_id, app_payload=server_app_permission_payload) <19>:<add> graph_client, <del> auth_headers, <22>:<add> request_app=client_app(server_app_id, server_app_permission, app_identifier), <del> app_payload=create_client_app_payload(server_app_id, server_app_permission_payload, app_identifier), <24>:<add> <25>:<del> await update_application( <26>:<del> auth_headers, <27>:<del> object_id=server_object_id, <28>:<add> await graph_client.applications.by_application_id(server_object
# module: scripts.auth_init def main(): <0> if not test_authentication_enabled(): <1> print("Not setting up authentication.") <2> exit(0) <3> <4> print("Setting up authentication...") <5> credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) <6> auth_headers = await get_auth_headers(credential) <7> <8> app_identifier = random_app_identifier() <9> server_object_id, server_app_id, _ = await create_or_update_application_with_secret( <10> auth_headers, <11> app_id_env_var="AZURE_SERVER_APP_ID", <12> app_secret_env_var="AZURE_SERVER_APP_SECRET", <13> app_payload=create_server_app_initial_payload(app_identifier), <14> ) <15> print("Setting up server application permissions...") <16> server_app_permission_payload = create_server_app_permission_setup_payload(server_app_id) <17> await update_application(auth_headers, object_id=server_object_id, app_payload=server_app_permission_payload) <18> _, client_app_id, _ = await create_or_update_application_with_secret( <19> auth_headers, <20> app_id_env_var="AZURE_CLIENT_APP_ID", <21> app_secret_env_var="AZURE_CLIENT_APP_SECRET", <22> app_payload=create_client_app_payload(server_app_id, server_app_permission_payload, app_identifier), <23> ) <24> print("Setting up server known client applications...") <25> await update_application( <26> auth_headers, <27> object_id=server_object_id, <28> app_payload=create_server_app_known_client_application_payload(client_app_id), <29> ) <30> print("Authentication setup complete.") <31>
===========changed ref 0=========== # module: scripts.auth_init - def create_server_app_known_client_application_payload(client_app_id: str): - return { - "api": { - "knownClientApplications": [client_app_id], - } - } - ===========changed ref 1=========== # module: scripts.auth_init + def server_app_known_client_application(client_app_id: str) -> Application: + return Application( + api=ApiApplication( + known_client_applications=[client_app_id], + ) + ) + ===========changed ref 2=========== # module: scripts.auth_init - def create_server_app_initial_payload(identifier: int): - return { - "displayName": f"Azure Search OpenAI Chat Server App {identifier}", - "signInAudience": "AzureADMyOrg", - } - ===========changed ref 3=========== # module: scripts.auth_init + def server_app_initial(identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Server App {identifier}", + sign_in_audience="AzureADMyOrg", + ) + ===========changed ref 4=========== # module: scripts.auth_init + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Client App {identifier}", + sign_in_audience="AzureADMyOrg", + web=WebApplication( + redirect_uris=["http://localhost:50505/.auth/login/aad/callback"], + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True), + ), + spa=SpaApplication(redirect_uris=["http://localhost:50505/redirect", "http://localhost:5173/redirect"]), + required_resource_access=[ + RequiredResourceAccess( + resource_app_id=server_app_id, + resource_access=[ + ResourceAccess( + id=server_app.api.oauth2_permission_scopes[0].id, + type="Scope", + ) + ], + ), + # Graph User.Read + RequiredResourceAccess( + resource_app_id="00000003-0000-0000-c000-000000000000", + resource_access=[ + ResourceAccess(id="e1fe6dd8-ba31-4d61-89e7-88639da4683d", type="Scope"), + ], + ), + ], + ) + ===========changed ref 5=========== # module: scripts.auth_init + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str: - def add_client_secret(auth_headers: Dict[str, str], object_id: str): - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post( - f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", - json={"passwordCredential": {"displayName": "secret"}}, - ) as response: - response_json = await response.json() - if response.status == 200: - return response_json["secretText"] + request_password = AddPasswordPostRequestBody( + password_credential=PasswordCredential(display_name="WebAppSecret"), + ) + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password) + return result.secret_text - raise Exception(response_json) - ===========changed ref 6=========== # module: scripts.auth_init - def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int): - return { - "displayName": f"Azure Search OpenAI Chat Client App {identifier}", - "signInAudience": "AzureADMyOrg", - "web": { - "redirectUris": ["http://localhost:50505/.auth/login/aad/callback"], - "implicitGrantSettings": {"enableIdTokenIssuance": True}, - }, - "spa": {"redirectUris": ["http://localhost:50505/redirect", "http://localhost:5173/redirect"]}, - "requiredResourceAccess": [ - # access_as_user from server app - { - "resourceAppId": server_app_id, - "resourceAccess": [ - { - "id": server_app_permission_setup_payload["api"]["oauth2PermissionScopes"][0]["id"], - "type": "Scope", - } - ], - }, - # Graph User.Read - { - "resourceAppId": "00000003-0000-0000-c000-000000000000", - "resourceAccess": [{"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d", "type": "Scope"}], - }, - ], - } - ===========changed ref 7=========== # module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: - if response.status != 201: - raise Exception(await response.json()) - response_json = await response.json() - object_id = response_json["id"] - client_id = response_json["appId"] + app = await graph_client.applications.post(request_app) + object_id = app.id + client_id = app.app_id - async with session.post( - "https://graph.microsoft.com/v1.0/servicePrincipals", - json={"appId": client_id, "displayName": app_payload["displayName"]}, - ) as response: - if response.status != 201: - raise Exception(await response.json()) - + # Create a service principal + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) + await graph_client.service_principals.post(request_principal) return object_id, client_id
app.backend.core.authentication/AuthenticationHelper.validate_access_token
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
# module: app.backend.core.authentication class AuthenticationHelper: def validate_access_token(self, token: str): <0> """ <1> Validate an access token is issued by Entra <2> """ <3> jwks = None <4> async for attempt in AsyncRetrying( <5> retry=retry_if_exception_type(AuthError), <6> wait=wait_random_exponential(min=15, max=60), <7> stop=stop_after_attempt(5), <8> ): <9> with attempt: <10> async with aiohttp.ClientSession() as session: <11> async with session.get(url=self.key_url) as resp: <12> resp_status = resp.status <13> if resp_status in [500, 502, 503, 504]: <14> raise AuthError( <15> error=f"Failed to get keys info: {await resp.text()}", status_code=resp_status <16> ) <17> jwks = await resp.json() <18> <19> if not jwks or "keys" not in jwks: <20> raise AuthError({"code": "invalid_keys", "description": "Unable to get keys to validate auth token."}, 401) <21> <22> rsa_key = None <23> issuer = None <24> audience = None <25> try: <26> unverified_header = jwt.get_unverified_header(token) <27> unverified_claims = jwt.get_unverified_claims(token) <28> issuer = unverified_claims.get("iss") <29> audience = unverified_claims.get("aud") <30> for key in jwks["keys"]: <31> if key["kid"] == unverified_header["kid"]: <32> rsa_key = {"kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"]} <33> break <34> except Exception as exc: <35> raise AuthError( <36> {"code": "invalid_header", "description": "Unable to parse authorization token."}, 401 <37> ) from exc </s>
===========below chunk 0=========== # module: app.backend.core.authentication class AuthenticationHelper: def validate_access_token(self, token: str): # offset: 1 raise AuthError({"code": "invalid_header", "description": "Unable to find appropriate key"}, 401) if issuer not in self.valid_issuers: raise AuthError( {"code": "invalid_header", "description": f"Issuer {issuer} not in {','.join(self.valid_issuers)}"}, 401 ) if audience not in self.valid_audiences: raise AuthError( { "code": "invalid_header", "description": f"Audience {audience} not in {','.join(self.valid_audiences)}", }, 401, ) try: jwt.decode(token, rsa_key, algorithms=["RS256"], audience=audience, issuer=issuer) except jwt.ExpiredSignatureError as jwt_expired_exc: raise AuthError({"code": "token_expired", "description": "token is expired"}, 401) from jwt_expired_exc except jwt.JWTClaimsError as jwt_claims_exc: raise AuthError( {"code": "invalid_claims", "description": "incorrect claims," "please check the audience and issuer"}, 401, ) from jwt_claims_exc except Exception as exc: raise AuthError( {"code": "invalid_header", "description": "Unable to parse authorization token."}, 401 ) from exc ===========changed ref 0=========== # module: scripts.auth_init + def server_app_known_client_application(client_app_id: str) -> Application: + return Application( + api=ApiApplication( + known_client_applications=[client_app_id], + ) + ) + ===========changed ref 1=========== # module: scripts.auth_init - def create_server_app_known_client_application_payload(client_app_id: str): - return { - "api": { - "knownClientApplications": [client_app_id], - } - } - ===========changed ref 2=========== # module: scripts.auth_init + def server_app_initial(identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Server App {identifier}", + sign_in_audience="AzureADMyOrg", + ) + ===========changed ref 3=========== # module: scripts.auth_init - def create_server_app_initial_payload(identifier: int): - return { - "displayName": f"Azure Search OpenAI Chat Server App {identifier}", - "signInAudience": "AzureADMyOrg", - } - ===========changed ref 4=========== # module: scripts.auth_init + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str: - def add_client_secret(auth_headers: Dict[str, str], object_id: str): - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post( - f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", - json={"passwordCredential": {"displayName": "secret"}}, - ) as response: - response_json = await response.json() - if response.status == 200: - return response_json["secretText"] + request_password = AddPasswordPostRequestBody( + password_credential=PasswordCredential(display_name="WebAppSecret"), + ) + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password) + return result.secret_text - raise Exception(response_json) - ===========changed ref 5=========== # module: scripts.auth_init + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Client App {identifier}", + sign_in_audience="AzureADMyOrg", + web=WebApplication( + redirect_uris=["http://localhost:50505/.auth/login/aad/callback"], + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True), + ), + spa=SpaApplication(redirect_uris=["http://localhost:50505/redirect", "http://localhost:5173/redirect"]), + required_resource_access=[ + RequiredResourceAccess( + resource_app_id=server_app_id, + resource_access=[ + ResourceAccess( + id=server_app.api.oauth2_permission_scopes[0].id, + type="Scope", + ) + ], + ), + # Graph User.Read + RequiredResourceAccess( + resource_app_id="00000003-0000-0000-c000-000000000000", + resource_access=[ + ResourceAccess(id="e1fe6dd8-ba31-4d61-89e7-88639da4683d", type="Scope"), + ], + ), + ], + ) + ===========changed ref 6=========== # module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: - if response.status != 201: - raise Exception(await response.json()) - response_json = await response.json() - object_id = response_json["id"] - client_id = response_json["appId"] + app = await graph_client.applications.post(request_app) + object_id = app.id + client_id = app.app_id - async with session.post( - "https://graph.microsoft.com/v1.0/servicePrincipals", - json={"appId": client_id, "displayName": app_payload["displayName"]}, - ) as response: - if response.status != 201: - raise Exception(await response.json()) - + # Create a service principal + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) + await graph_client.service_principals.post(request_principal) return object_id, client_id
scripts.auth_common/get_application
Modified
Azure-Samples~azure-search-openai-demo
a2df481dde933157a5a309220a4de78a9f1463b5
Port to the Graph SDK for authentication scripts (#1510)
<0>:<add> try: <add> app = await graph_client.applications_with_app_id(client_id).get() <add> return app.id <add> except APIError: <add> return None <del> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1>:<del> async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: <2>:<del> if response.status == 200: <3>:<del> response_json = await response.json() <4>:<del> return response_json["id"] <6>:<del> return None <7>:<del>
# module: scripts.auth_common + def get_application(graph_client: GraphServiceClient, client_id: str) -> Optional[str]: - def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: <0> async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: <1> async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: <2> if response.status == 200: <3> response_json = await response.json() <4> return response_json["id"] <5> <6> return None <7>
===========unchanged ref 0=========== at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: scripts.auth_common - TIMEOUT = 60 - ===========changed ref 1=========== # module: scripts.auth_common - def get_auth_headers(credential: AsyncTokenCredential): - token_result = await credential.get_token("https://graph.microsoft.com/.default") - return {"Authorization": f"Bearer {token_result.token}"} - ===========changed ref 2=========== # module: scripts.auth_init + def server_app_known_client_application(client_app_id: str) -> Application: + return Application( + api=ApiApplication( + known_client_applications=[client_app_id], + ) + ) + ===========changed ref 3=========== # module: scripts.auth_init - def create_server_app_known_client_application_payload(client_app_id: str): - return { - "api": { - "knownClientApplications": [client_app_id], - } - } - ===========changed ref 4=========== # module: scripts.auth_init + def server_app_initial(identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Server App {identifier}", + sign_in_audience="AzureADMyOrg", + ) + ===========changed ref 5=========== # module: scripts.auth_init - def create_server_app_initial_payload(identifier: int): - return { - "displayName": f"Azure Search OpenAI Chat Server App {identifier}", - "signInAudience": "AzureADMyOrg", - } - ===========changed ref 6=========== # module: scripts.auth_init + def add_client_secret(graph_client: GraphServiceClient, app_id: str) -> str: - def add_client_secret(auth_headers: Dict[str, str], object_id: str): - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post( - f"https://graph.microsoft.com/v1.0/applications/{object_id}/addPassword", - json={"passwordCredential": {"displayName": "secret"}}, - ) as response: - response_json = await response.json() - if response.status == 200: - return response_json["secretText"] + request_password = AddPasswordPostRequestBody( + password_credential=PasswordCredential(display_name="WebAppSecret"), + ) + result = await graph_client.applications.by_application_id(app_id).add_password.post(request_password) + return result.secret_text - raise Exception(response_json) - ===========changed ref 7=========== # module: scripts.auth_init + def client_app(server_app_id: str, server_app: Application, identifier: int) -> Application: + return Application( + display_name=f"Azure Search OpenAI Chat Client App {identifier}", + sign_in_audience="AzureADMyOrg", + web=WebApplication( + redirect_uris=["http://localhost:50505/.auth/login/aad/callback"], + implicit_grant_settings=ImplicitGrantSettings(enable_id_token_issuance=True), + ), + spa=SpaApplication(redirect_uris=["http://localhost:50505/redirect", "http://localhost:5173/redirect"]), + required_resource_access=[ + RequiredResourceAccess( + resource_app_id=server_app_id, + resource_access=[ + ResourceAccess( + id=server_app.api.oauth2_permission_scopes[0].id, + type="Scope", + ) + ], + ), + # Graph User.Read + RequiredResourceAccess( + resource_app_id="00000003-0000-0000-c000-000000000000", + resource_access=[ + ResourceAccess(id="e1fe6dd8-ba31-4d61-89e7-88639da4683d", type="Scope"), + ], + ), + ], + ) + ===========changed ref 8=========== # module: scripts.auth_init + def create_application(graph_client: GraphServiceClient, request_app: Application) -> Tuple[str, str]: - def create_application(auth_headers: Dict[str, str], app_payload: Dict[str, Any]) -> Tuple[str, str]: - async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: - async with session.post("https://graph.microsoft.com/v1.0/applications", json=app_payload) as response: - if response.status != 201: - raise Exception(await response.json()) - response_json = await response.json() - object_id = response_json["id"] - client_id = response_json["appId"] + app = await graph_client.applications.post(request_app) + object_id = app.id + client_id = app.app_id - async with session.post( - "https://graph.microsoft.com/v1.0/servicePrincipals", - json={"appId": client_id, "displayName": app_payload["displayName"]}, - ) as response: - if response.status != 201: - raise Exception(await response.json()) - + # Create a service principal + request_principal = ServicePrincipal(app_id=client_id, display_name=app.display_name) + await graph_client.service_principals.post(request_principal) return object_id, client_id ===========changed ref 9=========== # module: scripts.auth_init - def create_client_app_payload(server_app_id: str, server_app_permission_setup_payload: Dict[str, Any], identifier: int): - return { - "displayName": f"Azure Search OpenAI Chat Client App {identifier}", - "signInAudience": "AzureADMyOrg", - "web": { - "redirectUris": ["http://localhost:50505/.auth/login/aad/callback"], - "implicitGrantSettings": {"enableIdTokenIssuance": True}, - }, - "spa": {"redirectUris": ["http://localhost:50505/redirect", "http://localhost:5173/redirect"]}, - "requiredResourceAccess": [ - # access_as_user from server app - { - "resourceAppId": server_app_id, - "resourceAccess": [ - { - "id": server_app_permission_setup_payload["api"]["oauth2PermissionScopes"][0]["id"], - "type": "Scope", - } - ], - }, - # Graph User.Read - { - "resourceAppId": "00000003-0000-0000-c000-000000000000", - "resourceAccess": [{"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d", "type": "Scope"}], - }, - ], - } -
tests.test_upload/test_upload_file
Modified
Azure-Samples~azure-search-openai-demo
0124725ad85726aa3f62450e16fb136a63154f69
Adds storageURL field to track file location (#1535)
<22>:<del> path = kwargs.get("file") <23>:<del> if path in self.files: <24>:<del> return self.files[path] <25>:<del> self.files[path] = DataLakeFileClient(path) <26>:<del> return self.files[path] <27>:<add> return azure.storage.filedatalake.aio.DataLakeFileClient( <add> account_url="https://test.blob.core.windows.net/", file_system_name="user-content", file_path=args[0] <add> )
# module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): <0> async def mock_get_directory_properties(self, *args, **kwargs): <1> if directory_exists: <2> return None <3> else: <4> raise azure.core.exceptions.ResourceNotFoundError() <5> <6> monkeypatch.setattr(DataLakeDirectoryClient, "get_directory_properties", mock_get_directory_properties) <7> <8> directory_created = [False] <9> <10> async def mock_create_directory(self, *args, **kwargs): <11> directory_created[0] = True <12> <13> monkeypatch.setattr(DataLakeDirectoryClient, "create_directory", mock_create_directory) <14> <15> async def mock_directory_set_access_control(self, *args, **kwargs): <16> assert kwargs.get("owner") == "OID_X" <17> return None <18> <19> monkeypatch.setattr(DataLakeDirectoryClient, "set_access_control", mock_directory_set_access_control) <20> <21> def mock_directory_get_file_client(self, *args, **kwargs): <22> path = kwargs.get("file") <23> if path in self.files: <24> return self.files[path] <25> self.files[path] = DataLakeFileClient(path) <26> return self.files[path] <27> <28> monkeypatch.setattr(DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client) <29> <30> async def mock_upload_file(self, *args, **kwargs): <31> assert kwargs.get("overwrite") is True <32> assert kwargs.get("metadata") == {"UploadedBy": "OID_X"} <33> return None <34> <35> monkeypatch.setattr(DataLakeFileClient, "upload_data",</s>
===========below chunk 0=========== # module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): # offset: 1 async def mock_create_client(self, *args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=CreateEmbeddingResponse( object="list", data=[ Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) documents_uploaded = [] async def mock_upload_documents(self, documents): documents_uploaded.extend(documents) monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents) monkeypatch.setattr(AzureOpenAIEmbeddingService, "create_client", mock_create_client) response = await auth_client.post( "/upload", headers={"Authorization": "Bearer test"}, files={"file": FileStorage(BytesIO(b"foo;bar"), filename="a.txt")}, ) message = (await response.get_json())["message"] assert message == "File uploaded successfully" assert response.status_code == 200 assert len(documents_uploaded) == 1 assert documents_uploaded[0]["id"] == "file-a_txt-612E7478747B276F696473273A205B274F49445F58275D7D-</s> ===========below chunk 1=========== # module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): # offset: 2 <s>E7478747B276F696473273A205B274F49445F58275D7D-page-0" assert documents_uploaded[0]["sourcepage"] == "a.txt" assert documents_uploaded[0]["sourcefile"] == "a.txt" assert documents_uploaded[0]["embedding"] == [0.0023064255, -0.009327292, -0.0028842222] assert documents_uploaded[0]["category"] is None assert documents_uploaded[0]["oids"] == ["OID_X"] assert directory_created[0] == (not directory_exists) ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: io BytesIO(initial_bytes: bytes=...) at: tests.mocks MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse) MockClient(embeddings_client) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
tests.test_upload/test_delete_uploaded
Modified
Azure-Samples~azure-search-openai-demo
0124725ad85726aa3f62450e16fb136a63154f69
Adds storageURL field to track file location (#1535)
<4>:<add> <add> def mock_directory_get_file_client(self, *args, **kwargs): <add> return azure.storage.filedatalake.aio.DataLakeFileClient( <add> account_url="https://test.blob.core.windows.net/", file_system_name="user-content", file_path=args[0] <add> ) <add> <add> monkeypatch.setattr(DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client)
# module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): <0> async def mock_delete_file(self): <1> return None <2> <3> monkeypatch.setattr(DataLakeFileClient, "delete_file", mock_delete_file) <4> <5> class AsyncSearchResultsIterator: <6> def __init__(self): <7> self.results = [ <8> { <9> "sourcepage": "a.txt", <10> "sourcefile": "a.txt", <11> "content": "This is a test document.", <12> "embedding": [], <13> "category": None, <14> "id": "file-a_txt-7465737420646F63756D656E742E706466", <15> "oids": ["OID_X"], <16> "@search.score": 0.03279569745063782, <17> "@search.reranker_score": 3.4577205181121826, <18> }, <19> { <20> "sourcepage": "a.txt", <21> "sourcefile": "a.txt", <22> "content": "This is a test document.", <23> "embedding": [], <24> "category": None, <25> "id": "file-a_txt-7465737420646F63756D656E742E706422", <26> "oids": [], <27> "@search.score": 0.03279569745063782, <28> "@search.reranker_score": 3.4577205181121826, <29> }, <30> { <31> "sourcepage": "a.txt", <32> "sourcefile": "a.txt", <33> "content": "This is a test document.", <34> "embedding": [], <35> "category": None, <36> "id": "file-a_txt-7465737420646F63756D656E742E</s>
===========below chunk 0=========== # module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): # offset: 1 "oids": ["OID_X", "OID_Y"], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, }, ] def __aiter__(self): return self async def __anext__(self): if len(self.results) == 0: raise StopAsyncIteration return self.results.pop() async def get_count(self): return len(self.results) search_results = AsyncSearchResultsIterator() searched_filters = [] async def mock_search(self, *args, **kwargs): self.filter = kwargs.get("filter") searched_filters.append(self.filter) return search_results monkeypatch.setattr(SearchClient, "search", mock_search) deleted_documents = [] async def mock_delete_documents(self, documents): deleted_documents.extend(documents) return documents monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) response = await auth_client.post( "/delete_uploaded", headers={"Authorization": "Bearer test"}, json={"filename": "a.txt"} ) assert response.status_code == 200 assert len(searched_filters) == 2, "It should have searched twice (with no results on second try)" assert searched_filters[0] == "sourcefile eq 'a.txt'" assert len(deleted_documents) == 1, "It should have only deleted the document solely owned by OID_X" assert deleted_documents[0]["id"] == "file-a_txt-7465737420646F63756D656E742E706466" ===========unchanged ref 0=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): async def mock_get_directory_properties(self, *args, **kwargs): if directory_exists: return None else: raise azure.core.exceptions.ResourceNotFoundError() monkeypatch.setattr(DataLakeDirectoryClient, "get_directory_properties", mock_get_directory_properties) directory_created = [False] async def mock_create_directory(self, *args, **kwargs): directory_created[0] = True monkeypatch.setattr(DataLakeDirectoryClient, "create_directory", mock_create_directory) async def mock_directory_set_access_control(self, *args, **kwargs): assert kwargs.get("owner") == "OID_X" return None monkeypatch.setattr(DataLakeDirectoryClient, "set_access_control", mock_directory_set_access_control) def mock_directory_get_file_client(self, *args, **kwargs): - path = kwargs.get("file") - if path in self.files: - return self.files[path] - self.files[path] = DataLakeFileClient(path) - return self.files[path] + return azure.storage.filedatalake.aio.DataLakeFileClient( + account_url="https://test.blob.core.windows.net/", file_system_name="user-content", file_path=args[0] + ) monkeypatch.setattr(DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client) async def mock_upload_file(self, *args, **kwargs): assert kwargs.get("overwrite") is True assert kwargs.get("metadata") == {"Uploaded</s> ===========changed ref 1=========== # module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): # offset: 1 <s>, *args, **kwargs): assert kwargs.get("overwrite") is True assert kwargs.get("metadata") == {"UploadedBy": "OID_X"} return None monkeypatch.setattr(DataLakeFileClient, "upload_data", mock_upload_file) async def mock_create_client(self, *args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=CreateEmbeddingResponse( object="list", data=[ Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) documents_uploaded = [] async def mock_upload_documents(self, documents): documents_uploaded.extend(documents) monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents) monkeypatch.setattr(AzureOpenAIEmbeddingService, "create_client", mock_create_client) response = await auth_client.post( "/upload", headers={"Authorization": "Bearer test"}, files={"file": FileStorage(BytesIO(b"foo;bar"), filename="a.txt")</s> ===========changed ref 2=========== # module: tests.test_upload # parameterize for directory existing or not @pytest.mark.asyncio @pytest.mark.parametrize("directory_exists", [True, False]) async def test_upload_file(auth_client, monkeypatch, mock_data_lake_service_client, directory_exists): # offset: 2 <s> ) message = (await response.get_json())["message"] assert message == "File uploaded successfully" assert response.status_code == 200 assert len(documents_uploaded) == 1 assert documents_uploaded[0]["id"] == "file-a_txt-612E7478747B276F696473273A205B274F49445F58275D7D-page-0" assert documents_uploaded[0]["sourcepage"] == "a.txt" assert documents_uploaded[0]["sourcefile"] == "a.txt" assert documents_uploaded[0]["embedding"] == [0.0023064255, -0.009327292, -0.0028842222] assert documents_uploaded[0]["category"] is None assert documents_uploaded[0]["oids"] == ["OID_X"] assert directory_created[0] == (not directory_exists)
tests.test_blob_manager/test_upload_and_remove
Modified
Azure-Samples~azure-search-openai-demo
0124725ad85726aa3f62450e16fb136a63154f69
Adds storageURL field to track file location (#1535)
<12>:<add> return azure.storage.blob.aio.BlobClient.from_blob_url( <add> "https://test.blob.core.windows.net/test/test.pdf", credential=MockAzureCredential() <add> ) <del> return True <17>:<add> assert f.url == "https://test.blob.core.windows.net/test/test.pdf"
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> filename = os.path.basename(f.content.name) <3> <4> # Set up mocks used by upload_blob <5> async def mock_exists(*args, **kwargs): <6> return True <7> <8> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <9> <10> async def mock_upload_blob(self, name, *args, **kwargs): <11> assert name == filename <12> return True <13> <14> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <15> <16> await blob_manager.upload_blob(f) <17> <18> # Set up mocks used by remove_blob <19> def mock_list_blob_names(*args, **kwargs): <20> assert kwargs.get("name_starts_with") == filename.split(".pdf")[0] <21> <22> class AsyncBlobItemsIterator: <23> def __init__(self, file): <24> self.files = [file, "dontdelete.pdf"] <25> <26> def __aiter__(self): <27> return self <28> <29> async def __anext__(self): <30> if self.files: <31> return self.files.pop() <32> raise StopAsyncIteration <33> <34> return AsyncBlobItemsIterator(filename) <35> <36> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) <37> <38> async def mock_delete_blob(self, name, *args, **kwargs): <39> assert name == filename <40> return True <41> <42> mon</s>
===========below chunk 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): # offset: 1 await blob_manager.remove_blob(f.content.name) ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: sys version_info: _version_info at: sys._version_info major: int minor: int micro: int releaselevel: str serial: int ===========unchanged ref 1=========== at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): async def mock_delete_file(self): return None monkeypatch.setattr(DataLakeFileClient, "delete_file", mock_delete_file) + + def mock_directory_get_file_client(self, *args, **kwargs): + return azure.storage.filedatalake.aio.DataLakeFileClient( + account_url="https://test.blob.core.windows.net/", file_system_name="user-content", file_path=args[0] + ) + + monkeypatch.setattr(DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client) class AsyncSearchResultsIterator: def __init__(self): self.results = [ { "sourcepage": "a.txt", "sourcefile": "a.txt", "content": "This is a test document.", "embedding": [], "category": None, "id": "file-a_txt-7465737420646F63756D656E742E706466", "oids": ["OID_X"], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, }, { "sourcepage": "a.txt", "sourcefile": "a.txt", "content": "This is a test document.", "embedding": [], "category": None, "id": "file-a_txt-7465737420646F63756D656E742E706422", "oids": [], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, </s> ===========changed ref 1=========== # module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): # offset: 1 <s>9745063782, "@search.reranker_score": 3.4577205181121826, }, { "sourcepage": "a.txt", "sourcefile": "a.txt", "content": "This is a test document.", "embedding": [], "category": None, "id": "file-a_txt-7465737420646F63756D656E742E706433", "oids": ["OID_X", "OID_Y"], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, }, ] def __aiter__(self): return self async def __anext__(self): if len(self.results) == 0: raise StopAsyncIteration return self.results.pop() async def get_count(self): return len(self.results) search_results = AsyncSearchResultsIterator() searched_filters = [] async def mock_search(self, *args, **kwargs): self.filter = kwargs.get("filter") searched_filters.append(self.filter) return search_results monkeypatch.setattr(SearchClient, "search", mock_search) deleted_documents = [] async def mock_delete_documents(self, documents): deleted_documents.extend(documents) return documents monkeypatch.setattr(SearchClient, "delete_documents", mock_delete_documents) response = await auth_client.post( "/delete_uploaded", headers={"Authorization</s> ===========changed ref 2=========== # module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): # offset: 2 <s>Bearer test"}, json={"filename": "a.txt"} ) assert response.status_code == 200 assert len(searched_filters) == 2, "It should have searched twice (with no results on second try)" assert searched_filters[0] == "sourcefile eq 'a.txt'" assert len(deleted_documents) == 1, "It should have only deleted the document solely owned by OID_X" assert deleted_documents[0]["id"] == "file-a_txt-7465737420646F63756D656E742E706466"
tests.test_blob_manager/test_upload_and_remove_all
Modified
Azure-Samples~azure-search-openai-demo
0124725ad85726aa3f62450e16fb136a63154f69
Adds storageURL field to track file location (#1535)
<12>:<add> return azure.storage.blob.aio.BlobClient.from_blob_url( <add> "https://test.blob.core.windows.net/test/test.pdf", credential=MockAzureCredential() <add> ) <del> return True <17>:<add> assert f.url == "https://test.blob.core.windows.net/test/test.pdf"
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> filename = os.path.basename(f.content.name) <3> <4> # Set up mocks used by upload_blob <5> async def mock_exists(*args, **kwargs): <6> return True <7> <8> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <9> <10> async def mock_upload_blob(self, name, *args, **kwargs): <11> assert name == filename <12> return True <13> <14> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <15> <16> await blob_manager.upload_blob(f) <17> <18> # Set up mocks used by remove_blob <19> def mock_list_blob_names(*args, **kwargs): <20> assert kwargs.get("name_starts_with") is None <21> <22> class AsyncBlobItemsIterator: <23> def __init__(self, file): <24> self.files = [file] <25> <26> def __aiter__(self): <27> return self <28> <29> async def __anext__(self): <30> if self.files: <31> return self.files.pop() <32> raise StopAsyncIteration <33> <34> return AsyncBlobItemsIterator(filename) <35> <36> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) <37> <38> async def mock_delete_blob(self, name, *args, **kwargs): <39> assert name == filename <40> return True <41> <42> monkeypatch.setattr("azure.storage.blob.aio</s>
===========below chunk 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): # offset: 1 await blob_manager.remove_blob() ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skipif: _SkipifMarkDecorator at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: sys version_info: _version_info at: sys._version_info minor: int at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] at: tests.test_blob_manager.test_upload_and_remove f = File(temp_file.file) ===========unchanged ref 1=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): with NamedTemporaryFile(suffix=".pdf") as temp_file: f = File(temp_file.file) filename = os.path.basename(f.content.name) # Set up mocks used by upload_blob async def mock_exists(*args, **kwargs): return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) async def mock_upload_blob(self, name, *args, **kwargs): assert name == filename + return azure.storage.blob.aio.BlobClient.from_blob_url( + "https://test.blob.core.windows.net/test/test.pdf", credential=MockAzureCredential() + ) - return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) await blob_manager.upload_blob(f) + assert f.url == "https://test.blob.core.windows.net/test/test.pdf" # Set up mocks used by remove_blob def mock_list_blob_names(*args, **kwargs): assert kwargs.get("name_starts_with") == filename.split(".pdf")[0] class AsyncBlobItemsIterator: def __init__(self, file): self.files = [file, "dontdelete.pdf"] def __aiter__(self): return self async def __anext__(self): if self.files: return self.files.pop() raise StopAsyncIteration return AsyncBlobItemsIterator(filename) monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list</s> ===========changed ref 1=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): # offset: 1 <s> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) async def mock_delete_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) await blob_manager.remove_blob(f.content.name) ===========changed ref 2=========== # module: tests.test_upload @pytest.mark.asyncio async def test_delete_uploaded(auth_client, monkeypatch, mock_data_lake_service_client): async def mock_delete_file(self): return None monkeypatch.setattr(DataLakeFileClient, "delete_file", mock_delete_file) + + def mock_directory_get_file_client(self, *args, **kwargs): + return azure.storage.filedatalake.aio.DataLakeFileClient( + account_url="https://test.blob.core.windows.net/", file_system_name="user-content", file_path=args[0] + ) + + monkeypatch.setattr(DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client) class AsyncSearchResultsIterator: def __init__(self): self.results = [ { "sourcepage": "a.txt", "sourcefile": "a.txt", "content": "This is a test document.", "embedding": [], "category": None, "id": "file-a_txt-7465737420646F63756D656E742E706466", "oids": ["OID_X"], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, }, { "sourcepage": "a.txt", "sourcefile": "a.txt", "content": "This is a test document.", "embedding": [], "category": None, "id": "file-a_txt-7465737420646F63756D656E742E706422", "oids": [], "@search.score": 0.03279569745063782, "@search.reranker_score": 3.4577205181121826, </s>