path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
scripts.prepdocslib.searchmanager/SearchManager.create_index
Modified
Azure-Samples~azure-search-openai-demo
13e4cd8476c42158e708662539ce101b5b6a9655
[Backend/Prepdocs] Update to latest version of search SDK (#1010)
<16>:<add> vector_search_profile="embedding_config", <del> vector_search_configuration="default",
# module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): <0> if self.search_info.verbose: <1> print(f"Ensuring search index {self.search_info.index_name} exists") <2> <3> async with self.search_info.create_search_index_client() as search_index_client: <4> fields = [ <5> SimpleField(name="id", type="Edm.String", key=True), <6> SearchableField(name="content", type="Edm.String", analyzer_name=self.search_analyzer_name), <7> SearchField( <8> name="embedding", <9> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <10> hidden=False, <11> searchable=True, <12> filterable=False, <13> sortable=False, <14> facetable=False, <15> vector_search_dimensions=1536, <16> vector_search_configuration="default", <17> ), <18> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <19> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <20> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), <21> ] <22> if self.use_acls: <23> fields.append( <24> SimpleField( <25> name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True <26> ) <27> ) <28> fields.append( <29> SimpleField( <30> name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True <31> ) <32> ) <33> <34> index = SearchIndex( <35> name=self.search_info.index_name, <36> fields=fields, <37> semantic_settings=SemanticSettings( <38> configurations=[ <39> SemanticConfiguration( <40> name="default", <41> prioritized_fields=PrioritizedFields( <42> title_field</s>
===========below chunk 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): # offset: 1 ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: if self.search_info.verbose: print(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: if self.search_info.verbose: print(f"Search index {self.search_info.index_name} already exists") ===========unchanged ref 0=========== at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls at: scripts.prepdocslib.strategy.SearchInfo create_search_index_client() -> SearchIndexClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name self.verbose = verbose
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
13e4cd8476c42158e708662539ce101b5b6a9655
[Backend/Prepdocs] Update to latest version of search SDK (#1010)
<10>:<add> vectors: list[VectorQuery] = [] <14>:<del> else: <15>:<del> query_vector = None <16>:<add> vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding"))
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> <9> # If retrieval mode includes vectors, compute an embedding for the query <10> if has_vector: <11> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <12> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <13> query_vector = embedding["data"][0]["embedding"] <14> else: <15> query_vector = None <16> <17> # Only keep the text query if the retrieval mode uses text, otherwise drop it <18> query_text = q if has_text else "" <19> <20> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <21> if overrides.get("semantic_ranker") and has_text: <22> r = await self.search_client.search( <23> query_text, <24> filter=filter, <25> query_type=QueryType.SEMANTIC, <26> query_language=self.query_</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.insert_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.insert_message("assistant", self.answer) message_builder.insert_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["context"] = extra_info chat_completion.choices[0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder insert_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params)
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_batch
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<2>:<add> client = await self.create_client() <4>:<add> retry=retry_if_exception_type(RateLimitError), <del> retry=retry_if_exception_type(openai.error.RateLimitError), <10>:<del> emb_args = await self.create_embedding_arguments() <11>:<add> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) <del> emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) <12>:<add> embeddings.extend([data.embedding for data in emb_response.data]) <del> embeddings.extend([data["embedding"] for data in emb_response["data"]])
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: <0> batches = self.split_text_into_batches(texts) <1> embeddings = [] <2> for batch in batches: <3> async for attempt in AsyncRetrying( <4> retry=retry_if_exception_type(openai.error.RateLimitError), <5> wait=wait_random_exponential(min=15, max=60), <6> stop=stop_after_attempt(15), <7> before_sleep=self.before_retry_sleep, <8> ): <9> with attempt: <10> emb_args = await self.create_embedding_arguments() <11> emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) <12> embeddings.extend([data["embedding"] for data in emb_response["data"]]) <13> if self.verbose: <14> print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") <15> <16> return embeddings <17>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError -
scripts.prepdocslib.embeddings/OpenAIEmbeddings.create_embedding_single
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> client = await self.create_client() <1>:<add> retry=retry_if_exception_type(RateLimitError), <del> retry=retry_if_exception_type(openai.error.RateLimitError), <7>:<del> emb_args = await self.create_embedding_arguments() <8>:<add> emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) <del> emb_response = await openai.Embedding.acreate(**emb_args, input=text) <10>:<add> return emb_response.data[0].embedding <del> return emb_response["data"][0]["embedding"]
# module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: <0> async for attempt in AsyncRetrying( <1> retry=retry_if_exception_type(openai.error.RateLimitError), <2> wait=wait_random_exponential(min=15, max=60), <3> stop=stop_after_attempt(15), <4> before_sleep=self.before_retry_sleep, <5> ): <6> with attempt: <7> emb_args = await self.create_embedding_arguments() <8> emb_response = await openai.Embedding.acreate(**emb_args, input=text) <9> <10> return emb_response["data"][0]["embedding"] <11>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] + client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) - emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) + embeddings.extend([data.embedding for data in emb_response.data]) - embeddings.extend([data["embedding"] for data in emb_response["data"]]) if self.verbose: print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings
scripts.prepdocslib.embeddings/AzureOpenAIEmbeddingService.wrap_credential
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<9>:<add> raise TypeError("Invalid credential type") <del> raise Exception("Invalid credential type")
# module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: <0> if isinstance(self.credential, AzureKeyCredential): <1> return self.credential.key <2> <3> if isinstance(self.credential, AsyncTokenCredential): <4> if not self.cached_token or self.cached_token.expires_on <= time.time(): <5> self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") <6> <7> return self.cached_token.token <8> <9> raise Exception("Invalid credential type") <10>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"] ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] + client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) - emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) + embeddings.extend([data.embedding for data in emb_response.data]) - embeddings.extend([data["embedding"] for data in emb_response["data"]]) if self.verbose: print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings
app.backend.core.messagebuilder/MessageBuilder.__init__
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> self.messages: list[ChatCompletionMessageParam] = [ <add> ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) <del> self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] <1>:<add> ]
# module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): <0> self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] <1> self.model = chatgpt_model <2>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"] ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] + client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) - emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) + embeddings.extend([data.embedding for data in emb_response.data]) - embeddings.extend([data["embedding"] for data in emb_response["data"]]) if self.verbose: print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings
app.backend.core.messagebuilder/MessageBuilder.insert_message
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<4>:<add> role (str): The role of the message sender (either "user", "system", or "assistant"). <del> role (str): The role of the message sender (either "user" or "system"). <8>:<add> message: ChatCompletionMessageParam <add> if role == "user": <add> message = ChatCompletionUserMessageParam(role="user", content=self.normalize_content(content)) <add> elif role == "system": <add> message = ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(content)) <add> elif role == "assistant": <add> message = ChatCompletionAssistantMessageParam(role="assistant", content=self.normalize_content(content)) <add> else: <add> raise ValueError(f"Invalid role: {role}") <add> self.messages.insert(index, message) <del> self.messages.insert(index, {"role": role, "content": self.normalize_content(content)})
# module: app.backend.core.messagebuilder class MessageBuilder: def insert_message(self, role: str, content: str, index: int = 1): <0> """ <1> Inserts a message into the conversation at the specified index, <2> or at index 1 (after system message) if no index is specified. <3> Args: <4> role (str): The role of the message sender (either "user" or "system"). <5> content (str): The content of the message. <6> index (int): The index at which to insert the message. <7> """ <8> self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) <9>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder normalize_content(content: str) ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"] ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] + client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) - emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) + embeddings.extend([data.embedding for data in emb_response.data]) - embeddings.extend([data["embedding"] for data in emb_response["data"]]) if self.verbose: print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings
app.backend.core.modelhelper/num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<15>:<add> for value in message.values(): <del> for key, value in message.items(): <16>:<add> num_tokens += len(encoding.encode(str(value))) <del> num_tokens += len(encoding.encode(value))
# module: app.backend.core.modelhelper def num_tokens_from_messages(message: dict[str, str], model: str) -> int: <0> """ <1> Calculate the number of tokens required to encode a message. <2> Args: <3> message (dict): The message to encode, represented as a dictionary. <4> model (str): The name of the model to use for encoding. <5> Returns: <6> int: The total number of tokens required to encode the message. <7> Example: <8> message = {'role': 'user', 'content': 'Hello, how are you?'} <9> model = 'gpt-3.5-turbo' <10> num_tokens_from_messages(message, model) <11> output: 11 <12> """ <13> encoding = tiktoken.encoding_for_model(get_oai_chatmodel_tiktok(model)) <14> num_tokens = 2 # For "role" and "content" keys <15> for key, value in message.items(): <16> num_tokens += len(encoding.encode(value)) <17> return num_tokens <18>
===========unchanged ref 0=========== at: app.backend.core.modelhelper get_oai_chatmodel_tiktok(aoaimodel: str) -> str at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] at: tiktoken.model encoding_for_model(model_name: str) -> Encoding ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 6=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"] ===========changed ref 10=========== # module: app.backend.core.messagebuilder class MessageBuilder: def insert_message(self, role: str, content: str, index: int = 1): """ Inserts a message into the conversation at the specified index, or at index 1 (after system message) if no index is specified. Args: + role (str): The role of the message sender (either "user", "system", or "assistant"). - role (str): The role of the message sender (either "user" or "system"). content (str): The content of the message. index (int): The index at which to insert the message. """ + message: ChatCompletionMessageParam + if role == "user": + message = ChatCompletionUserMessageParam(role="user", content=self.normalize_content(content)) + elif role == "system": + message = ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(content)) + elif role == "assistant": + message = ChatCompletionAssistantMessageParam(role="assistant", content=self.normalize_content(content)) + else: + raise ValueError(f"Invalid role: {role}") + self.messages.insert(index, message) - self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_batch(self, texts: List[str]) -> List[List[float]]: batches = self.split_text_into_batches(texts) embeddings = [] + client = await self.create_client() for batch in batches: async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=batch.texts) - emb_response = await openai.Embedding.acreate(**emb_args, input=batch.texts) + embeddings.extend([data.embedding for data in emb_response.data]) - embeddings.extend([data["embedding"] for data in emb_response["data"]]) if self.verbose: print(f"Batch Completed. Batch size {len(batch.texts)} Token count {batch.token_length}") return embeddings
tests.test_prepdocs/test_compute_embedding_success
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> async def mock_create_client(*args, **kwargs): <del> async def mock_create(*args, **kwargs): <2>:<add> return MockClient( <add> embeddings_client=MockEmbeddingsClient( <add> create_embedding_response=openai.types.CreateEmbeddingResponse( <del> return { <3>:<add> object="list", <del> "object": "list", <4>:<del> "data": [ <5>:<del> { <6>:<del> "object": "embedding", <7>:<add> data=[ <add> openai.types.Embedding( <add> embedding=[ <del> "embedding": [ <8>:<add> 0.0023064255, <del> 0.0023064255, <9>:<add> -0.009327292, <del> -0.009327292, <10>:<add> -0.0028842222, <del> -0.0028842222, <11>:<add> ], <add> index=0, <add> object="embedding", <add> ) <12>:<add> model="text-embedding-ada-002", <add> usage=Usage(prompt_tokens=8, total_tokens=8), <del> "index": 0, <13>:<add> ) <del> } <14>:<add> ) <del> ], <15>:<del> "model": "text-embedding-ada-002", <16>:<del> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <17>:<add> ) <del> } <19>:<del> monkeypatch.setattr(openai.Embedding, "acreate", mock_create) <27>:<add> monkeypatch.setattr(embeddings, "create_client", mock_create_client)
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): <0> async def mock_create(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return { <3> "object": "list", <4> "data": [ <5> { <6> "object": "embedding", <7> "embedding": [ <8> 0.0023064255, <9> -0.009327292, <10> -0.0028842222, <11> ], <12> "index": 0, <13> } <14> ], <15> "model": "text-embedding-ada-002", <16> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <17> } <18> <19> monkeypatch.setattr(openai.Embedding, "acreate", mock_create) <20> embeddings = AzureOpenAIEmbeddingService( <21> open_ai_service="x", <22> open_ai_deployment="x", <23> open_ai_model_name="text-ada-003", <24> credential=MockAzureCredential(), <25> disable_batch=False, <26> ) <27> assert await embeddings.create_embeddings(texts=["foo"]) == [ <28> [ <29> 0.0023064255, <30> -0.009327292, <31> -0.0028842222, <32> ] <33> ] <34> <35> embeddings = AzureOpenAIEmbeddingService( <36> open_ai_service="x", <37> open_ai_deployment="x", <38> open_ai_model_name="text-ada-003", <39> credential=MockAzureCredential(), <40> disable_batch=True, <41> ) <42> assert await embeddings.create_embeddings(texts=["foo"]) == [ <43> [ <44> 0.0023064255, <45> -0.009327292, <46> -0.002884</s>
===========below chunk 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 6=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"] ===========changed ref 10=========== # module: app.backend.core.modelhelper def num_tokens_from_messages(message: dict[str, str], model: str) -> int: """ Calculate the number of tokens required to encode a message. Args: message (dict): The message to encode, represented as a dictionary. model (str): The name of the model to use for encoding. Returns: int: The total number of tokens required to encode the message. Example: message = {'role': 'user', 'content': 'Hello, how are you?'} model = 'gpt-3.5-turbo' num_tokens_from_messages(message, model) output: 11 """ encoding = tiktoken.encoding_for_model(get_oai_chatmodel_tiktok(model)) num_tokens = 2 # For "role" and "content" keys + for value in message.values(): - for key, value in message.items(): + num_tokens += len(encoding.encode(str(value))) - num_tokens += len(encoding.encode(value)) return num_tokens
tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> async def mock_acreate(*args, **kwargs): <1>:<del> raise openai.error.RateLimitError <2>:<del> <3>:<del> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <14>:<add> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client)
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): <0> async def mock_acreate(*args, **kwargs): <1> raise openai.error.RateLimitError <2> <3> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <4> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <5> with pytest.raises(tenacity.RetryError): <6> embeddings = AzureOpenAIEmbeddingService( <7> open_ai_service="x", <8> open_ai_deployment="x", <9> open_ai_model_name="text-embedding-ada-002", <10> credential=MockAzureCredential(), <11> disable_batch=False, <12> verbose=True, <13> ) <14> await embeddings.create_embeddings(texts=["foo"]) <15> captured = capsys.readouterr() <16> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 <17>
===========changed ref 0=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 1=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 2=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): + async def mock_create_client(*args, **kwargs): - async def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create + return MockClient( + embeddings_client=MockEmbeddingsClient( + create_embedding_response=openai.types.CreateEmbeddingResponse( - return { + object="list", - "object": "list", - "data": [ - { - "object": "embedding", + data=[ + openai.types.Embedding( + embedding=[ - "embedding": [ + 0.0023064255, - 0.0023064255, + -0.009327292, - -0.009327292, + -0.0028842222, - -0.0028842222, + ], + index=0, + object="embedding", + ) ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), - "index": 0, + ) - } + ) - ], - "model": "text-embedding-ada-002", - "usage": {"prompt_tokens": 8, "total_tokens": 8}, + ) - } - monkeypatch.setattr(openai.Embedding, "acreate", mock_create) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert</s> ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) + monkeypatch.setattr(embeddings, "create_client",</s> ===========changed ref 5=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 12=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } -
tests.test_prepdocs/test_compute_embedding_ratelimiterror_single
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> async def mock_acreate(*args, **kwargs): <1>:<del> raise openai.error.RateLimitError <2>:<del> <3>:<del> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <14>:<add> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client)
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys): <0> async def mock_acreate(*args, **kwargs): <1> raise openai.error.RateLimitError <2> <3> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <4> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <5> with pytest.raises(tenacity.RetryError): <6> embeddings = AzureOpenAIEmbeddingService( <7> open_ai_service="x", <8> open_ai_deployment="x", <9> open_ai_model_name="text-embedding-ada-002", <10> credential=MockAzureCredential(), <11> disable_batch=True, <12> verbose=True, <13> ) <14> await embeddings.create_embeddings(texts=["foo"]) <15> captured = capsys.readouterr() <16> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 <17>
===========changed ref 0=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 1=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 2=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): - async def mock_acreate(*args, **kwargs): - raise openai.error.RateLimitError - - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=False, verbose=True, ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): + async def mock_create_client(*args, **kwargs): - async def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create + return MockClient( + embeddings_client=MockEmbeddingsClient( + create_embedding_response=openai.types.CreateEmbeddingResponse( - return { + object="list", - "object": "list", - "data": [ - { - "object": "embedding", + data=[ + openai.types.Embedding( + embedding=[ - "embedding": [ + 0.0023064255, - 0.0023064255, + -0.009327292, - -0.009327292, + -0.0028842222, - -0.0028842222, + ], + index=0, + object="embedding", + ) ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), - "index": 0, + ) - } + ) - ], - "model": "text-embedding-ada-002", - "usage": {"prompt_tokens": 8, "total_tokens": 8}, + ) - } - monkeypatch.setattr(openai.Embedding, "acreate", mock_create) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert</s> ===========changed ref 5=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) + monkeypatch.setattr(embeddings, "create_client",</s> ===========changed ref 6=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) +
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> async def mock_acreate(*args, **kwargs): <1>:<del> raise openai.error.AuthenticationError <2>:<del> <3>:<del> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <5>:<add> with pytest.raises(openai.AuthenticationError): <del> with pytest.raises(openai.error.AuthenticationError): <14>:<add> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <16>:<add> with pytest.raises(openai.AuthenticationError): <del> with pytest.raises(openai.error.AuthenticationError): <25>:<add> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client)
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): <0> async def mock_acreate(*args, **kwargs): <1> raise openai.error.AuthenticationError <2> <3> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <4> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <5> with pytest.raises(openai.error.AuthenticationError): <6> embeddings = AzureOpenAIEmbeddingService( <7> open_ai_service="x", <8> open_ai_deployment="x", <9> open_ai_model_name="text-embedding-ada-002", <10> credential=MockAzureCredential(), <11> disable_batch=False, <12> verbose=True, <13> ) <14> await embeddings.create_embeddings(texts=["foo"]) <15> <16> with pytest.raises(openai.error.AuthenticationError): <17> embeddings = AzureOpenAIEmbeddingService( <18> open_ai_service="x", <19> open_ai_deployment="x", <20> open_ai_model_name="text-embedding-ada-002", <21> credential=MockAzureCredential(), <22> disable_batch=True, <23> verbose=True, <24> ) <25> await embeddings.create_embeddings(texts=["foo"]) <26>
===========changed ref 0=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 1=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 2=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 3=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, capsys): - async def mock_acreate(*args, **kwargs): - raise openai.error.RateLimitError - - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=True, verbose=True, ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 7=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, capsys): - async def mock_acreate(*args, **kwargs): - raise openai.error.RateLimitError - - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-embedding-ada-002", credential=MockAzureCredential(), disable_batch=False, verbose=True, ) + monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 8=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): + async def mock_create_client(*args, **kwargs): - async def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create + return MockClient( + embeddings_client=MockEmbeddingsClient( + create_embedding_response=openai.types.CreateEmbeddingResponse( - return { + object="list", - "object": "list", - "data": [ - { - "object": "embedding", + data=[ + openai.types.Embedding( + embedding=[ - "embedding": [ + 0.0023064255, - 0.0023064255, + -0.009327292, - -0.009327292, + -0.0028842222, - -0.0028842222, + ], + index=0, + object="embedding", + ) ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), - "index": 0, + ) - } + ) - ], - "model": "text-embedding-ada-002", - "usage": {"prompt_tokens": 8, "total_tokens": 8}, + ) - } - monkeypatch.setattr(openai.Embedding, "acreate", mock_create) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert</s> ===========changed ref 9=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s> ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) + monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) + monkeypatch.setattr(embeddings, "create_client",</s>
tests.test_chatapproach/test_get_search_query
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del> <4>:<add> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"this is the query","role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <del> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}'
# module: tests.test_chatapproach + def test_get_search_query(chat_approach): - def test_get_search_query(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <5> default_query = "hello" <6> query = chat_approach.get_search_query(json.loads(payload), default_query) <7> <8> assert query == "accesstelemedicineservices" <9>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 6=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 9=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 10=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 11=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 14=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): def create_embedding_single(self, text: str) -> List[float]: + client = await self.create_client() async for attempt in AsyncRetrying( + retry=retry_if_exception_type(RateLimitError), - retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=self.before_retry_sleep, ): with attempt: - emb_args = await self.create_embedding_arguments() + emb_response = await client.embeddings.create(model=self.open_ai_model_name, input=text) - emb_response = await openai.Embedding.acreate(**emb_args, input=text) + return emb_response.data[0].embedding - return emb_response["data"][0]["embedding"]
tests.test_chatapproach/test_get_search_query_returns_default
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del> <4>:<add> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"","role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <del> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <6>:<add> chatcompletions = ChatCompletion.model_validate(json.loads(payload), strict=False) <add> query = chat_approach.get_search_query(chat
# module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <5> default_query = "hello" <6> query = chat_approach.get_search_query(json.loads(payload), default_query) <7> <8> assert query == default_query <9>
===========unchanged ref 0=========== at: tests.test_chatapproach chat_approach() ===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_search_query(chat_approach): - def test_get_search_query(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"this is the query","role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' - payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":</s> ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_search_query(chat_approach): - def test_get_search_query(): # offset: 1 <s>],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" + chatcompletions = ChatCompletion.model_validate(json.loads(payload), strict=False) + query = chat_approach.get_search_query(chatcompletions, default_query) - query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 12=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 13=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 14=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 17=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model
tests.test_chatapproach/test_get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", <6> model_id="gpt-35-turbo", <7> history=[ <8> {"role": "user", "content": "What happens in a performance review?"}, <9> { <10> "role": "assistant", <11> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <12> }, <13> {"role": "user", "content": "What does a Product Manager do?"}, <14> ], <15> user_content="What does a Product Manager do?", <16> max_tokens=3000, <17> ) <18> assert messages == [ <19> {"role": "system", "content": "You are a bot."}, <20> {"role": "user", "content": "What happens in a performance review?"}, <21> { <22> "role": "assistant", <23> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The</s>
===========below chunk 0=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): # offset: 1 }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========unchanged ref 0=========== at: json loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any at: tests.test_chatapproach.test_get_search_query query = chat_approach.get_search_query(chatcompletions, default_query) ===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"","role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' - payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' </s> ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): # offset: 1 <s>results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" + chatcompletions = ChatCompletion.model_validate(json.loads(payload), strict=False) + query = chat_approach.get_search_query(chatcompletions, default_query) - query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_search_query(chat_approach): - def test_get_search_query(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"this is the query","role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' - payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":</s> ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_search_query(chat_approach): - def test_get_search_query(): # offset: 1 <s>],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" + chatcompletions = ChatCompletion.model_validate(json.loads(payload), strict=False) + query = chat_approach.get_search_query(chatcompletions, default_query) - query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response +
tests.test_chatapproach/test_get_messages_from_history_truncated
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", <6> model_id="gpt-35-turbo", <7> history=[ <8> {"role": "user", "content": "What happens in a performance review?"}, <9> { <10> "role": "assistant", <11> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <12> }, <13> {"role": "user", "content": "What does a Product Manager do?"}, <14> ], <15> user_content="What does a Product Manager do?", <16> max_tokens=10, <17> ) <18> assert messages == [ <19> {"role": "system", "content": "You are a bot."}, <20> {"role": "user", "content": "What does a Product Manager do?"}, <21> ] <22>
===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=3000, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a</s> ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): # offset: 1 <s> and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"","role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' - payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' </s> ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): # offset: 1 <s>results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" + chatcompletions = ChatCompletion.model_validate(json.loads(payload), strict=False) + query = chat_approach.get_search_query(chatcompletions, default_query) - query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query
tests.test_chatapproach/test_get_messages_from_history_truncated_longer
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", # 8 tokens <6> model_id="gpt-35-turbo", <7> history=[ <8> {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens <9> { <10> "role": "assistant", <11> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <12> }, # 102 tokens <13> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <14> { <15> "role": "assistant", <16> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <17> }, # 26 tokens <18> {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens <19> ], <20> user_content="What does a Product Manager do?", <21> max_tokens=55, <22> ) <23> assert messages == [ <24> {"role": "system", "</s>
===========below chunk 0=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): # offset: 1 {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=3000, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a</s> ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): # offset: 1 <s> and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_search_query_returns_default(chat_approach): - def test_get_search_query_returns_default(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"content":"","role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' - payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' </s>
tests.test_chatapproach/test_get_messages_from_history_truncated_break_pair
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<1>:<del> chat_approach = ChatReadRetrieveReadApproach( <2>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <3>:<del> ) <4>:<del>
# module: tests.test_chatapproach + def test_get_messages_from_history_truncated_break_pair(chat_approach): - def test_get_messages_from_history_truncated_break_pair(): <0> """Tests that the truncation breaks the pair of messages.""" <1> chat_approach = ChatReadRetrieveReadApproach( <2> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <3> ) <4> <5> messages = chat_approach.get_messages_from_history( <6> system_prompt="You are a bot.", # 8 tokens <7> model_id="gpt-35-turbo", <8> history=[ <9> {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens <10> { <11> "role": "assistant", <12> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <13> }, # 102 tokens <14> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <15> { <16> "role": "assistant", <17> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <18> }, # 26 tokens <19> {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens <20> ], <21> user_content="What does a Product Manager do?", <22> max_tokens=147, <23> ) </s>
===========below chunk 0=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_break_pair(chat_approach): - def test_get_messages_from_history_truncated_break_pair(): # offset: 1 {"role": "system", "content": "You are a bot."}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s> ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): # offset: 1 <s> {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
tests.test_chatapproach/test_extract_followup_questions
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_extract_followup_questions(chat_approach): - def test_extract_followup_questions(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> content = "Here is answer to your question.<<What is the dress code?>>" <5> pre_content, followup_questions = chat_approach.extract_followup_questions(content) <6> assert pre_content == "Here is answer to your question." <7> assert followup_questions == ["What is the dress code?"] <8>
===========unchanged ref 0=========== at: tests.test_chatapproach.test_extract_followup_questions pre_content, followup_questions = chat_approach.extract_followup_questions(content) pre_content, followup_questions = chat_approach.extract_followup_questions(content) ===========changed ref 0=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s> ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): # offset: 1 <s> {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=3000, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a</s> ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history(chat_approach): - def test_get_messages_from_history(): # offset: 1 <s> and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
tests.test_chatapproach/test_extract_followup_questions_three
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_extract_followup_questions_three(chat_approach): - def test_extract_followup_questions_three(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> content = """Here is answer to your question. <5> <6> <<What are some examples of successful product launches they should have experience with?>> <7> <<Are there any specific technical skills or certifications required for the role?>> <8> <<Is there a preference for candidates with experience in a specific industry or sector?>>""" <9> pre_content, followup_questions = chat_approach.extract_followup_questions(content) <10> assert pre_content == "Here is answer to your question.\n\n" <11> assert followup_questions == [ <12> "What are some examples of successful product launches they should have experience with?", <13> "Are there any specific technical skills or certifications required for the role?", <14> "Is there a preference for candidates with experience in a specific industry or sector?", <15> ] <16>
===========unchanged ref 0=========== at: tests.test_chatapproach.test_extract_followup_questions_three pre_content, followup_questions = chat_approach.extract_followup_questions(content) pre_content, followup_questions = chat_approach.extract_followup_questions(content) ===========changed ref 0=========== # module: tests.test_chatapproach + def test_extract_followup_questions(chat_approach): - def test_extract_followup_questions(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question.<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == ["What is the dress code?"] ===========changed ref 1=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 2=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s> ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): # offset: 1 <s> {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
tests.test_chatapproach/test_extract_followup_questions_no_followup
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> content = "Here is answer to your question." <5> pre_content, followup_questions = chat_approach.extract_followup_questions(content) <6> assert pre_content == "Here is answer to your question." <7> assert followup_questions == [] <8>
===========unchanged ref 0=========== at: tests.test_chatapproach.test_extract_followup_questions_no_pre_content pre_content, followup_questions = chat_approach.extract_followup_questions(content) ===========changed ref 0=========== # module: tests.test_chatapproach + def test_extract_followup_questions(chat_approach): - def test_extract_followup_questions(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question.<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == ["What is the dress code?"] ===========changed ref 1=========== # module: tests.test_chatapproach + def test_extract_followup_questions_three(chat_approach): - def test_extract_followup_questions_three(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = """Here is answer to your question. <<What are some examples of successful product launches they should have experience with?>> <<Are there any specific technical skills or certifications required for the role?>> <<Is there a preference for candidates with experience in a specific industry or sector?>>""" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question.\n\n" assert followup_questions == [ "What are some examples of successful product launches they should have experience with?", "Are there any specific technical skills or certifications required for the role?", "Is there a preference for candidates with experience in a specific industry or sector?", ] ===========changed ref 2=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s> ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): # offset: 1 <s> {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
tests.test_chatapproach/test_extract_followup_questions_no_pre_content
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(chat_approach): - def test_extract_followup_questions_no_pre_content(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> content = "<<What is the dress code?>>" <5> pre_content, followup_questions = chat_approach.extract_followup_questions(content) <6> assert pre_content == "" <7> assert followup_questions == ["What is the dress code?"] <8>
===========unchanged ref 0=========== at: tests.test_chatapproach.test_get_messages_from_history_few_shots messages = chat_approach.get_messages_from_history( system_prompt=chat_approach.query_prompt_template, model_id=chat_approach.chatgpt_model, user_content=user_query_request, history=[], max_tokens=chat_approach.chatgpt_token_limit - len(user_query_request), few_shots=chat_approach.query_prompt_few_shots, ) ===========changed ref 0=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == [] ===========changed ref 1=========== # module: tests.test_chatapproach + def test_extract_followup_questions(chat_approach): - def test_extract_followup_questions(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question.<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == ["What is the dress code?"] ===========changed ref 2=========== # module: tests.test_chatapproach + def test_extract_followup_questions_three(chat_approach): - def test_extract_followup_questions_three(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = """Here is answer to your question. <<What are some examples of successful product launches they should have experience with?>> <<Are there any specific technical skills or certifications required for the role?>> <<Is there a preference for candidates with experience in a specific industry or sector?>>""" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question.\n\n" assert followup_questions == [ "What are some examples of successful product launches they should have experience with?", "Are there any specific technical skills or certifications required for the role?", "Is there a preference for candidates with experience in a specific industry or sector?", ] ===========changed ref 3=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s>
tests.test_chatapproach/test_get_messages_from_history_few_shots
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> chat_approach = ChatReadRetrieveReadApproach( <1>:<del> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2>:<del> ) <3>:<del>
# module: tests.test_chatapproach + def test_get_messages_from_history_few_shots(chat_approach): - def test_get_messages_from_history_few_shots(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> user_query_request = "What does a Product manager do?" <5> messages = chat_approach.get_messages_from_history( <6> system_prompt=chat_approach.query_prompt_template, <7> model_id=chat_approach.chatgpt_model, <8> user_content=user_query_request, <9> history=[], <10> max_tokens=chat_approach.chatgpt_token_limit - len(user_query_request), <11> few_shots=chat_approach.query_prompt_few_shots, <12> ) <13> # Make sure messages are in the right order <14> assert messages[0]["role"] == "system" <15> assert messages[1]["role"] == "user" <16> assert messages[2]["role"] == "assistant" <17> assert messages[3]["role"] == "user" <18> assert messages[4]["role"] == "assistant" <19> assert messages[5]["role"] == "user" <20> assert messages[5]["content"] == user_query_request <21>
===========changed ref 0=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(chat_approach): - def test_extract_followup_questions_no_pre_content(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "" assert followup_questions == ["What is the dress code?"] ===========changed ref 1=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == [] ===========changed ref 2=========== # module: tests.test_chatapproach + def test_extract_followup_questions(chat_approach): - def test_extract_followup_questions(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question.<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == ["What is the dress code?"] ===========changed ref 3=========== # module: tests.test_chatapproach + def test_extract_followup_questions_three(chat_approach): - def test_extract_followup_questions_three(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = """Here is answer to your question. <<What are some examples of successful product launches they should have experience with?>> <<Are there any specific technical skills or certifications required for the role?>> <<Is there a preference for candidates with experience in a specific industry or sector?>>""" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question.\n\n" assert followup_questions == [ "What are some examples of successful product launches they should have experience with?", "Are there any specific technical skills or certifications required for the role?", "Is there a preference for candidates with experience in a specific industry or sector?", ] ===========changed ref 4=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(chat_approach): - def test_get_messages_from_history_truncated(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 6=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(chat_approach): - def test_get_messages_from_history_truncated_longer(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", # 8 tokens model_id="gpt-35-turbo", history=[ {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, # 102 tokens {"role": "user", "content": "Is there a dress code?"}, # 9 tokens { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, # 26 tokens {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens ], user_content="What does a Product Manager do?", max_tokens=55, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "</s>
tests.conftest/mock_openai_embedding
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<1>:<add> return CreateEmbeddingResponse( <add> object="list", <add> data=[ <add> Embedding( <add> embedding=[ <add> 0.0023064255, <add> -0.009327292, <add> -0.0028842222, <add> ], <add> index=0, <add> object="embedding", <add> ) <add> ], <add> model="text-embedding-ada-002", <add> usage=Usage(prompt_tokens=8, total_tokens=8), <add> ) <del> if openai.api_type == "openai": <2>:<del> assert kwargs.get("deployment_id") is None <3>:<del> else: <4>:<del> assert kwargs.get("deployment_id") is not None <5>:<del> return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} <7>:<add> def patch(openai_client): <add> monkeypatch.setattr(openai_client.embeddings, "create", mock_acreate) <del> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate)
# module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): <0> async def mock_acreate(*args, **kwargs): <1> if openai.api_type == "openai": <2> assert kwargs.get("deployment_id") is None <3> else: <4> assert kwargs.get("deployment_id") is not None <5> return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} <6> <7> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <8>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 6=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 9=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 10=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 11=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 14=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 15=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 17=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == [] ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type") ===========changed ref 19=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(chat_approach): - def test_extract_followup_questions_no_pre_content(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "<<What is the dress code?>>" pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "" assert followup_questions == ["What is the dress code?"]
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<2>:<add> chunk_id = "test-id" <add> model = "gpt-35-turbo" <3>:<add> {"object": "chat.completion.chunk", "choices": [], "id": chunk_id, "model": model, "created": 1}, <add> { <add> "object": "chat.completion.chunk", <del> {"object": "chat.completion.chunk", "choices": []}, <4>:<add> "choices": [{"delta": {"role": "assistant"}, "index": 0, "finish_reason": None}], <add> "id": chunk_id, <add> "model": model, <add> "created": 1, <add> }, <del> {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, <12>:<add> "choices": [ <add> { <add> "delta": {"role": "assistant", "content": parts[0] + "<<"}, <del> "choices": [{"delta": {"role": "assistant", "content": parts[0] + "<<"}}], <13>:<add> "index": 0, <add> "finish_reason": None, <add> } <add> ], <add> "id": chunk_id, <add> "model": model, <add> "created": 1, <18>:<add> "choices": [
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer: str): <2> self.responses = [ <3> {"object": "chat.completion.chunk", "choices": []}, <4> {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, <5> ] <6> # Split at << to simulate chunked responses <7> if answer.find("<<") > -1: <8> parts = answer.split("<<") <9> self.responses.append( <10> { <11> "object": "chat.completion.chunk", <12> "choices": [{"delta": {"role": "assistant", "content": parts[0] + "<<"}}], <13> } <14> ) <15> self.responses.append( <16> { <17> "object": "chat.completion.chunk", <18> "choices": [{"delta": {"role": "assistant", "content": parts[1]}}], <19> } <20> ) <21> else: <22> self.responses.append( <23> { <24> "object": "chat.completion.chunk", <25> "choices": [{"delta": {"content": answer}}], <26> } <27> ) <28> <29> def __aiter__(self): <30> return self <31> <32> async def __anext__(self): <33> if self.responses: <34> return self.responses.pop(0) <35> else: <36> raise StopAsyncIteration <37> <38> async def mock_acreate(*args, **kwargs): <39> if openai.api_type == "openai": <40> assert kwargs.get("deployment_id") is None <41> else: <42> assert kwargs.get("deployment_id") is not None <43> messages = kwargs["messages"] <44> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <45> answer = "capital of France" <46> else: </s>
===========below chunk 0=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 1 if messages[0]["content"].find("Generate 3 very brief follow-up questions") > -1: answer = "The capital of France is Paris. [Benefit_Options-2.pdf]. <<What is the capital of Spain?>>" if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return openai.util.convert_to_openai_object( {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} ) monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) ===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + return CreateEmbeddingResponse( + object="list", + data=[ + Embedding( + embedding=[ + 0.0023064255, + -0.009327292, + -0.0028842222, + ], + index=0, + object="embedding", + ) + ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), + ) - if openai.api_type == "openai": - assert kwargs.get("deployment_id") is None - else: - assert kwargs.get("deployment_id") is not None - return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} + def patch(openai_client): + monkeypatch.setattr(openai_client.embeddings, "create", mock_acreate) - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 7=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 10=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 11=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 12=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 15=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 16=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } -
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<4>:<add> mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <add> mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <del>
# module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): <0> quart_app = app.create_app() <1> <2> async with quart_app.test_app() as test_app: <3> quart_app.config.update({"TESTING": True}) <4> <5> yield test_app.test_client() <6>
===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + return CreateEmbeddingResponse( + object="list", + data=[ + Embedding( + embedding=[ + 0.0023064255, + -0.009327292, + -0.0028842222, + ], + index=0, + object="embedding", + ) + ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), + ) - if openai.api_type == "openai": - assert kwargs.get("deployment_id") is None - else: - assert kwargs.get("deployment_id") is not None - return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} + def patch(openai_client): + monkeypatch.setattr(openai_client.embeddings, "create", mock_acreate) - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 7=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 10=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 11=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 12=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 15=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 16=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 18=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == [] ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type")
tests.conftest/auth_client
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<14>:<add> mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <add> mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT])
# module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_list_groups_success, mock_acs_search_filter, request, ): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> for key, value in request.param.items(): <6> monkeypatch.setenv(key, value) <7> <8> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <9> mock_default_azure_credential.return_value = MockAzureCredential() <10> quart_app = app.create_app() <11> <12> async with quart_app.test_app() as test_app: <13> quart_app.config.update({"TESTING": True}) <14> client = test_app.test_client() <15> client.config = quart_app.config <16> <17> yield client <18>
===========changed ref 0=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client() ===========changed ref 1=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + return CreateEmbeddingResponse( + object="list", + data=[ + Embedding( + embedding=[ + 0.0023064255, + -0.009327292, + -0.0028842222, + ], + index=0, + object="embedding", + ) + ], + model="text-embedding-ada-002", + usage=Usage(prompt_tokens=8, total_tokens=8), + ) - if openai.api_type == "openai": - assert kwargs.get("deployment_id") is None - else: - assert kwargs.get("deployment_id") is not None - return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} + def patch(openai_client): + monkeypatch.setattr(openai_client.embeddings, "create", mock_acreate) - monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 11=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 12=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 13=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 16=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 17=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 19=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == []
tests.test_app/test_ask_handle_exception_contentsafety
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<2>:<del> mock.Mock( <3>:<del> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4>:<del> ), <5>:<add> mock.Mock(side_effect=filtered_response),
# module: tests.test_app @pytest.mark.asyncio async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): <0> monkeypatch.setattr( <1> "approaches.retrievethenread.RetrieveThenReadApproach.run", <2> mock.Mock( <3> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4> ), <5> ) <6> <7> response = await client.post( <8> "/ask", <9> json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, <10> ) <11> assert response.status_code == 400 <12> result = await response.get_json() <13> assert "Exception in /ask: The response was filtered" in caplog.text <14> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <15>
===========changed ref 0=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 7=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 10=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 11=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 12=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 15=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 16=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 17=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client() ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 19=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == [] ===========changed ref 20=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def wrap_credential(self) -> str: if isinstance(self.credential, AzureKeyCredential): return self.credential.key if isinstance(self.credential, AsyncTokenCredential): if not self.cached_token or self.cached_token.expires_on <= time.time(): self.cached_token = await self.credential.get_token("https://cognitiveservices.azure.com/.default") return self.cached_token.token + raise TypeError("Invalid credential type") - raise Exception("Invalid credential type")
tests.test_app/test_chat_handle_exception_contentsafety
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<2>:<del> mock.Mock( <3>:<del> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4>:<del> ), <5>:<add> mock.Mock(side_effect=filtered_response),
# module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): <0> monkeypatch.setattr( <1> "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", <2> mock.Mock( <3> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4> ), <5> ) <6> <7> response = await client.post( <8> "/chat", <9> json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, <10> ) <11> assert response.status_code == 400 <12> result = await response.get_json() <13> assert "Exception in /chat: The response was filtered" in caplog.text <14> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <15>
===========changed ref 0=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): monkeypatch.setattr( "approaches.retrievethenread.RetrieveThenReadApproach.run", - mock.Mock( - side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") - ), + mock.Mock(side_effect=filtered_response), ) response = await client.post( "/ask", json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, ) assert response.status_code == 400 result = await response.get_json() assert "Exception in /ask: The response was filtered" in caplog.text snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 11=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 12=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 13=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 16=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 17=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 18=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client() ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 20=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == []
tests.test_app/test_chat_handle_exception_streaming
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> chat_client = client.app.config[app.CONFIG_OPENAI_CLIENT] <1>:<add> chat_client.chat.completions, "create", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) <del> "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened"))
# module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_streaming(client, monkeypatch, snapshot, caplog): <0> monkeypatch.setattr( <1> "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) <2> ) <3> <4> response = await client.post( <5> "/chat", <6> json={"messages": [{"content": "What is the capital of France?", "role": "user"}], "stream": True}, <7> ) <8> assert response.status_code == 200 <9> assert "Exception while generating response stream: something bad happened" in caplog.text <10> result = await response.get_data() <11> snapshot.assert_match(result, "result.jsonlines") <12>
===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): monkeypatch.setattr( "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", - mock.Mock( - side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") - ), + mock.Mock(side_effect=filtered_response), ) response = await client.post( "/chat", json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, ) assert response.status_code == 400 result = await response.get_json() assert "Exception in /chat: The response was filtered" in caplog.text snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): monkeypatch.setattr( "approaches.retrievethenread.RetrieveThenReadApproach.run", - mock.Mock( - side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") - ), + mock.Mock(side_effect=filtered_response), ) response = await client.post( "/ask", json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, ) assert response.status_code == 400 result = await response.get_json() assert "Exception in /ask: The response was filtered" in caplog.text snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 12=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 13=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 14=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 17=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 18=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 19=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client()
tests.test_app/test_chat_handle_exception_contentsafety_streaming
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<del> monkeypatch.setattr( <1>:<del> "openai.ChatCompletion.acreate", <2>:<del> mock.Mock( <3>:<del> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4>:<del> ), <5>:<del> ) <6>:<add> chat_client = client.app.config[app.CONFIG_OPENAI_CLIENT] <add> monkeypatch.setattr(chat_client.chat.completions, "create", mock.Mock(side_effect=filtered_response))
# module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_contentsafety_streaming(client, monkeypatch, snapshot, caplog): <0> monkeypatch.setattr( <1> "openai.ChatCompletion.acreate", <2> mock.Mock( <3> side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") <4> ), <5> ) <6> <7> response = await client.post( <8> "/chat", <9> json={"messages": [{"content": "How do I do something bad?", "role": "user"}], "stream": True}, <10> ) <11> assert response.status_code == 200 <12> assert "Exception while generating response stream: The response was filtered" in caplog.text <13> result = await response.get_data() <14> snapshot.assert_match(result, "result.jsonlines") <15>
===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_streaming(client, monkeypatch, snapshot, caplog): + chat_client = client.app.config[app.CONFIG_OPENAI_CLIENT] monkeypatch.setattr( + chat_client.chat.completions, "create", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) - "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) ) response = await client.post( "/chat", json={"messages": [{"content": "What is the capital of France?", "role": "user"}], "stream": True}, ) assert response.status_code == 200 assert "Exception while generating response stream: something bad happened" in caplog.text result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): monkeypatch.setattr( "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", - mock.Mock( - side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") - ), + mock.Mock(side_effect=filtered_response), ) response = await client.post( "/chat", json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, ) assert response.status_code == 400 result = await response.get_json() assert "Exception in /chat: The response was filtered" in caplog.text snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): monkeypatch.setattr( "approaches.retrievethenread.RetrieveThenReadApproach.run", - mock.Mock( - side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") - ), + mock.Mock(side_effect=filtered_response), ) response = await client.post( "/ask", json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, ) assert response.status_code == 400 result = await response.get_json() assert "Exception in /ask: The response was filtered" in caplog.text snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 6=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 8=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 10=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 13=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 14=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 15=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 18=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 19=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) +
tests.test_searchmanager/test_update_content_with_embeddings
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> async def mock_create_client(*args, **kwargs): <del> async def mock_create(*args, **kwargs): <2>:<add> return MockClient( <add> embeddings_client=MockEmbeddingsClient( <add> create_embedding_response=openai.types.CreateEmbeddingResponse( <del> return { <3>:<add> object="list", <del> "object": "list", <4>:<del> "data": [ <5>:<del> { <6>:<del> "object": "embedding", <7>:<add> data=[ <add> openai.types.Embedding( <add> embedding=[ <del> "embedding": [ <8>:<add> 0.0023064255, <del> 0.0023064255, <9>:<add> -0.009327292, <del> -0.009327292, <10>:<add> -0.0028842222, <del> -0.0028842222, <11>:<add> ], <add> index=0, <add> object="embedding", <add> ) <12>:<add> model="text-embedding-ada-002", <add> usage=Usage(prompt_tokens=8, total_tokens=8), <del> "index": 0, <13>:<add> ) <del> } <14>:<add> ) <del> ], <15>:<del> "model": "text-embedding-ada-002", <16>:<del> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <17>:<add> ) <del> } <18>:<del> <19>:<del> monkeypatch.setattr(openai.Embedding, "acreate", mock_create) <27>:<add> embeddings = AzureOpenAIEmbeddingService( <add> open_ai_service="x", <add>
# module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): <0> async def mock_create(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return { <3> "object": "list", <4> "data": [ <5> { <6> "object": "embedding", <7> "embedding": [ <8> 0.0023064255, <9> -0.009327292, <10> -0.0028842222, <11> ], <12> "index": 0, <13> } <14> ], <15> "model": "text-embedding-ada-002", <16> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <17> } <18> <19> monkeypatch.setattr(openai.Embedding, "acreate", mock_create) <20> <21> documents_uploaded = [] <22> <23> async def mock_upload_documents(self, documents): <24> documents_uploaded.extend(documents) <25> <26> monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents) <27> <28> manager = SearchManager( <29> search_info, <30> embeddings=AzureOpenAIEmbeddingService( <31> open_ai_service="x", <32> open_ai_deployment="x", <33> open_ai_model_name="text-ada-003", <34> credential=AzureKeyCredential("test"), <35> disable_batch=True, <36> ), <37> ) <38> <39> test_io = io.BytesIO(b"test content") <40> test_io.name = "test/foo.pdf" <41> file = File(test_io) <42> <43> await manager.update_content( <44> [ <45> Section( <46> split_page=SplitPage( <47> page_num=0, <48> text="test content", <49> ), <50> content=file</s>
===========below chunk 0=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): # offset: 1 category="test", ) ] ) assert len(documents_uploaded) == 1, "It should have uploaded one document" assert documents_uploaded[0]["embedding"] == [ 0.0023064255, -0.009327292, -0.0028842222, ] ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO name: Any at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False, verbose: bool=False) at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.searchmanager SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, embeddings: Optional[OpenAIEmbeddings]=None) at: scripts.prepdocslib.searchmanager.SearchManager update_content(sections: List[Section]) at: scripts.prepdocslib.textsplitter SplitPage(page_num: int, text: str) at: tests.test_searchmanager MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse) MockClient(embeddings_client) at: tests.test_searchmanager.test_update_content_many ids = [] manager = SearchManager( search_info, ) sections = [] file = File(test_io) ===========changed ref 0=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 1=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 2=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 12=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 13=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 14=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 15=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 18=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 19=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<1>:<add> self.openai_client = openai_client <del> self.openai_host = openai_host <2>:<add> self.chatgpt_model = chatgpt_model <3>:<del> self.chatgpt_model = chatgpt_model
<s>pt_model: str, - openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI - chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_host = openai_host <2> self.chatgpt_deployment = chatgpt_deployment <3> self.chatgpt_model = chatgpt_model <4> self.embedding_deployment = embedding_deployment <5> self.embedding_model = embedding_model <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.query_language = query_language <9> self.query_speller = query_speller <10> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <11>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>".""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to an Azure AI Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 6=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 12=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 13=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 14=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 15=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 18=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<3>:<add> chat_completion_response: ChatCompletion = await chat_coroutine <add> chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable <del> chat_resp = dict(await chat_coroutine)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=False <2> ) <3> chat_resp = dict(await chat_coroutine) <4> chat_resp["choices"][0]["context"] = extra_info <5> if overrides.get("suggest_followup_questions"): <6> content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) <7> chat_resp["choices"][0]["message"]["content"] = content <8> chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions <9> chat_resp["choices"][0]["session_state"] = session_state <10> return chat_resp <11>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple extract_followup_questions(content: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.openai_client = openai_client self.chatgpt_model = chatgpt_model at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_until_final_call original_user_query = history[-1]["content"] query_text = self.get_search_query(chat_completion, original_user_query) query_text = None results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] content = "\n".join(results) messages_token_limit = self.chatgpt_token_limit - response_token_limit messages = self.get_messages_from_history( system_prompt=system_message, model_id=self.chatgpt_model, history=history, # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt. user_content=original_user_query + "\n\nSources:\n" + content, max_tokens=messages_token_limit, ) ===========unchanged ref 1=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== <s>: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top", 3) filter = self.build_filter(overrides, auth_claims) - original_user_query = history[-1]["content"] user_query_request = "Generate search query for: " + original_user_query functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure AI Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( system_prompt=self.query_prompt_template, model_id=self.chatgpt_model, history=history, user_content=user_query_request, max_tokens=self.chatgpt_token_limit - len(user_query_request), few_shots=self.query_prompt_few</s> ===========changed ref 1=========== <s>approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: # offset: 1 <s>gpt_token_limit - len(user_query_request), few_shots=self.query_prompt_few_shots, ) - - chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} - chat_completion = await openai.ChatCompletion.acreate( - **chatgpt_args, - model=self.chatgpt_model, + chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( + messages=messages, # type: ignore - messages=messages, + # Azure Open AI takes the deployment name as the model name + model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, temperature=0.0, max_tokens=100, # Setting too low risks malformed JSON, setting too high may affect performance n=1, functions=functions, function_call="auto", ) query_text = self.get_search_query(chat_completion, original_user_query) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query vectors: list[VectorQuery] = [] if has_vector: + embedding = await self.openai_client</s> ===========changed ref 2=========== <s>approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: # offset: 2 <s>dings.create( + # Azure Open AI takes the deployment name as the model name + model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, + input=query_text, + ) - embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} - embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) + query_vector = embedding.data[0].embedding - query_vector = embedding["data"][0]["embedding"] vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding")) # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language=self.query_language, query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<18>:<add> async for event_chunk in await chat_coroutine: <del> async for event in await chat_coroutine: <20>:<add> event = event_chunk.model_dump() # Convert pydantic model to dict <22>:<add> content = event["choices"][0]["delta"].get("content") <del> content = event["choices"][0]["delta"].get("content", "") <23>:<add> content = content or "" # content may either not exist in delta, or explicitly be None
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=True <2> ) <3> yield { <4> "choices": [ <5> { <6> "delta": {"role": self.ASSISTANT}, <7> "context": extra_info, <8> "session_state": session_state, <9> "finish_reason": None, <10> "index": 0, <11> } <12> ], <13> "object": "chat.completion.chunk", <14> } <15> <16> followup_questions_started = False <17> followup_content = "" <18> async for event in await chat_coroutine: <19> # "2023-07-01-preview" API version has a bug where first response has empty choices <20> if event["choices"]: <21> # if event contains << and not >>, it is start of follow-up question, truncate <22> content = event["choices"][0]["delta"].get("content", "") <23> if overrides.get("suggest_followup_questions") and "<<" in content: <24> followup_questions_started = True <25> earlier_content = content[: content.index("<<")] <26> if earlier_content: <27> event["choices"][0]["delta"]["content"] = earlier_content <28> yield event <29> followup_content += content[content.index("<<") :] <30> elif followup_questions_started: <31> followup_content += content <32> else: <33> yield event <34> if followup_content: <35> _, followup_questions = self.extract_followup_questions(follow</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: # offset: 1 yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": {"followup_questions": followup_questions}, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_without_streaming extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== <s>: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top", 3) filter = self.build_filter(overrides, auth_claims) - original_user_query = history[-1]["content"] user_query_request = "Generate search query for: " + original_user_query functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure AI Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( system_prompt=self.query_prompt_template, model_id=self.chatgpt_model, history=history, user_content=user_query_request, max_tokens=self.chatgpt_token_limit - len(user_query_request), few_shots=self.query_prompt_few</s> ===========changed ref 1=========== <s>approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: # offset: 1 <s>gpt_token_limit - len(user_query_request), few_shots=self.query_prompt_few_shots, ) - - chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} - chat_completion = await openai.ChatCompletion.acreate( - **chatgpt_args, - model=self.chatgpt_model, + chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( + messages=messages, # type: ignore - messages=messages, + # Azure Open AI takes the deployment name as the model name + model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, temperature=0.0, max_tokens=100, # Setting too low risks malformed JSON, setting too high may affect performance n=1, functions=functions, function_call="auto", ) query_text = self.get_search_query(chat_completion, original_user_query) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query vectors: list[VectorQuery] = [] if has_vector: + embedding = await self.openai_client</s> ===========changed ref 2=========== <s>approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, + ) -> tuple[dict[str, Any], Coroutine[Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]]]: - ) -> tuple: # offset: 2 <s>dings.create( + # Azure Open AI takes the deployment name as the model name + model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, + input=query_text, + ) - embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} - embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) + query_vector = embedding.data[0].embedding - query_vector = embedding["data"][0]["embedding"] vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding")) # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language=self.query_language, query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<3>:<del> # Workaround for: https://github.com/openai/openai-python/issues/371 <4>:<del> async with aiohttp.ClientSession() as s: <5>:<del> openai.aiosession.set(s) <6>:<add> return await self.run_without_streaming(messages, overrides, auth_claims, session_state) <del> response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) <7>:<del> return response
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> overrides = context.get("overrides", {}) <1> auth_claims = context.get("auth_claims", {}) <2> if stream is False: <3> # Workaround for: https://github.com/openai/openai-python/issues/371 <4> async with aiohttp.ClientSession() as s: <5> openai.aiosession.set(s) <6> response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) <7> return response <8> else: <9> return self.run_with_streaming(messages, overrides, auth_claims, session_state) <10>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_with_streaming event = event_chunk.model_dump() # Convert pydantic model to dict at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_completion_response: ChatCompletion = await chat_coroutine + chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable - chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info if overrides.get("suggest_followup_questions"): content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) chat_resp["choices"][0]["message"]["content"] = content chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== <s>pt_model: str, - openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI - chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client + self.openai_client = openai_client - self.openai_host = openai_host + self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment - self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": extra_info, "session_state": session_state, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } followup_questions_started = False followup_content = "" + async for event_chunk in await chat_coroutine: - async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices + event = event_chunk.model_dump() # Convert pydantic model to dict if event["choices"]: # if event contains << and not >>, it is start of follow-up question, truncate + content = event["choices"][0]["delta"].get("content") - content = event["choices"][0]["delta"].get("content", "") + content = content or "" # content may either not exist in delta, or explicitly be None if overrides.get("suggest_followup_questions") and "<<" in content: followup_questions_started = True earlier_content = content[: content.index("<<")] if earlier_content: event["choices"][0]["delta"]["content"] = earlier_content yield event followup_content += content[content.index("<<") :] elif followup_questions_</s> ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: # offset: 1 <s>content yield event followup_content += content[content.index("<<") :] elif followup_questions_started: followup_content += content else: yield event if followup_content: _, followup_questions = self.extract_followup_questions(followup_content) yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": {"followup_questions": followup_questions}, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 6=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 8=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 9=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 10=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 11=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 12=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<9>:<add> total_token_count = message_builder.count_tokens_for_message(dict(message_builder.messages[-1])) # type: ignore <del> total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1])
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_content: str, max_tokens: int, few_shots=[], + ) -> list[ChatCompletionMessageParam]: - ) -> list: <0> message_builder = MessageBuilder(system_prompt, model_id) <1> <2> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <3> for shot in reversed(few_shots): <4> message_builder.insert_message(shot.get("role"), shot.get("content")) <5> <6> append_index = len(few_shots) + 1 <7> <8> message_builder.insert_message(self.USER, user_content, index=append_index) <9> total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1]) <10> <11> newest_to_oldest = list(reversed(history[:-1])) <12> for message in newest_to_oldest: <13> potential_message_count = message_builder.count_tokens_for_message(message) <14> if (total_token_count + potential_message_count) > max_tokens: <15> logging.debug("Reached max tokens of %d, history will be truncated", max_tokens) <16> break <17> message_builder.insert_message(message["role"], message["content"], index=append_index) <18> total_token_count += potential_message_count <19> return message_builder.messages <20>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" extract_followup_questions(content: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_with_streaming followup_content += content[content.index("<<") :] followup_content = "" followup_content += content at: approaches.approach.Approach run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) if stream is False: - # Workaround for: https://github.com/openai/openai-python/issues/371 - async with aiohttp.ClientSession() as s: - openai.aiosession.set(s) + return await self.run_without_streaming(messages, overrides, auth_claims, session_state) - response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) - return response else: return self.run_with_streaming(messages, overrides, auth_claims, session_state) ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_completion_response: ChatCompletion = await chat_coroutine + chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable - chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info if overrides.get("suggest_followup_questions"): content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) chat_resp["choices"][0]["message"]["content"] = content chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 2=========== <s>pt_model: str, - openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI - chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client + self.openai_client = openai_client - self.openai_host = openai_host + self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment - self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": extra_info, "session_state": session_state, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } followup_questions_started = False followup_content = "" + async for event_chunk in await chat_coroutine: - async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices + event = event_chunk.model_dump() # Convert pydantic model to dict if event["choices"]: # if event contains << and not >>, it is start of follow-up question, truncate + content = event["choices"][0]["delta"].get("content") - content = event["choices"][0]["delta"].get("content", "") + content = content or "" # content may either not exist in delta, or explicitly be None if overrides.get("suggest_followup_questions") and "<<" in content: followup_questions_started = True earlier_content = content[: content.index("<<")] if earlier_content: event["choices"][0]["delta"]["content"] = earlier_content yield event followup_content += content[content.index("<<") :] elif followup_questions_</s> ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: # offset: 1 <s>content yield event followup_content += content[content.index("<<") :] elif followup_questions_started: followup_content += content else: yield event if followup_content: _, followup_questions = self.extract_followup_questions(followup_content) yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": {"followup_questions": followup_questions}, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError -
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_search_query
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> response_message = chat_completion.choices[0].message <del> response_message = chat_completion["choices"][0]["message"] <1>:<add> if function_call := response_message.function_call: <del> if function_call := response_message.get("function_call"): <2>:<add> if function_call.name == "search_sources": <del> if function_call["name"] == "search_sources": <3>:<add> arg = json.loads(function_call.arguments) <del> arg = json.loads(function_call["arguments"]) <7>:<add> elif query_text := response_message.content: <del> elif query_text := response_message.get("content"):
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def get_search_query(self, chat_completion: ChatCompletion, user_query: str): - def get_search_query(self, chat_completion: dict[str, Any], user_query: str): <0> response_message = chat_completion["choices"][0]["message"] <1> if function_call := response_message.get("function_call"): <2> if function_call["name"] == "search_sources": <3> arg = json.loads(function_call["arguments"]) <4> search_query = arg.get("search_query", self.NO_RESPONSE) <5> if search_query != self.NO_RESPONSE: <6> return search_query <7> elif query_text := response_message.get("content"): <8> if query_text.strip() != self.NO_RESPONSE: <9> return query_text <10> return user_query <11>
===========unchanged ref 0=========== at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) if stream is False: - # Workaround for: https://github.com/openai/openai-python/issues/371 - async with aiohttp.ClientSession() as s: - openai.aiosession.set(s) + return await self.run_without_streaming(messages, overrides, auth_claims, session_state) - response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) - return response else: return self.run_with_streaming(messages, overrides, auth_claims, session_state) ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_completion_response: ChatCompletion = await chat_coroutine + chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable - chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info if overrides.get("suggest_followup_questions"): content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) chat_resp["choices"][0]["message"]["content"] = content chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_content: str, max_tokens: int, few_shots=[], + ) -> list[ChatCompletionMessageParam]: - ) -> list: message_builder = MessageBuilder(system_prompt, model_id) # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. for shot in reversed(few_shots): message_builder.insert_message(shot.get("role"), shot.get("content")) append_index = len(few_shots) + 1 message_builder.insert_message(self.USER, user_content, index=append_index) + total_token_count = message_builder.count_tokens_for_message(dict(message_builder.messages[-1])) # type: ignore - total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1]) newest_to_oldest = list(reversed(history[:-1])) for message in newest_to_oldest: potential_message_count = message_builder.count_tokens_for_message(message) if (total_token_count + potential_message_count) > max_tokens: logging.debug("Reached max tokens of %d, history will be truncated", max_tokens) break message_builder.insert_message(message["role"], message["content"], index=append_index) total_token_count += potential_message_count return message_builder.messages ===========changed ref 3=========== <s>pt_model: str, - openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI - chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client + self.openai_client = openai_client - self.openai_host = openai_host + self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment - self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { "choices": [ { "delta": {"role": self.ASSISTANT}, "context": extra_info, "session_state": session_state, "finish_reason": None, "index": 0, } ], "object": "chat.completion.chunk", } followup_questions_started = False followup_content = "" + async for event_chunk in await chat_coroutine: - async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices + event = event_chunk.model_dump() # Convert pydantic model to dict if event["choices"]: # if event contains << and not >>, it is start of follow-up question, truncate + content = event["choices"][0]["delta"].get("content") - content = event["choices"][0]["delta"].get("content", "") + content = content or "" # content may either not exist in delta, or explicitly be None if overrides.get("suggest_followup_questions") and "<<" in content: followup_questions_started = True earlier_content = content[: content.index("<<")] if earlier_content: event["choices"][0]["delta"]["content"] = earlier_content yield event followup_content += content[content.index("<<") :] elif followup_questions_</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<1>:<add> self.openai_client = openai_client <del> self.openai_host = openai_host <2>:<del> self.chatgpt_deployment = chatgpt_deployment <5>:<add> self.chatgpt_deployment = chatgpt_deployment
<s>ai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI + embedding_model: str, - chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" - embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_host = openai_host <2> self.chatgpt_deployment = chatgpt_deployment <3> self.chatgpt_model = chatgpt_model <4> self.embedding_model = embedding_model <5> self.embedding_deployment = embedding_deployment <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.query_language = query_language <9> self.query_speller = query_speller <10>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 3=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 6=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 10=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 12=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 13=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 14=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 15=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 18=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 19=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 20=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client() ===========changed ref 21=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "deployment_id": self.open_ai_deployment, - "api_type": self.get_api_type(), - "api_key": await self.wrap_credential(), - "api_version": "2023-05-15", - "api_base": f"https://{self.open_ai_service}.openai.azure.com", - } - ===========changed ref 22=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(chat_approach): - def test_extract_followup_questions_no_followup(): - chat_approach = ChatReadRetrieveReadApproach( - None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" - ) - content = "Here is answer to your question." pre_content, followup_questions = chat_approach.extract_followup_questions(content) assert pre_content == "Here is answer to your question." assert followup_questions == []
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<8>:<del> <12>:<add> embedding = await self.openai_client.embeddings.create( <add> # Azure Open AI takes the deployment name as the model name <add> model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, <add> input=q, <add> ) <del> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <13>:<del> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <14>:<add> query_vector = embedding.data[0].embedding <del> query_vector = embedding["data"][0]["embedding"]
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> <9> # If retrieval mode includes vectors, compute an embedding for the query <10> vectors: list[VectorQuery] = [] <11> if has_vector: <12> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <13> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <14> query_vector = embedding["data"][0]["embedding"] <15> vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding")) <16> <17> # Only keep the text query if the retrieval mode uses text, otherwise drop it <18> query_text = q if has_text else "" <19> <20> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <21> if overrides.get("semantic_ranker") and has_text: <22> r = await self.search_client.search( <23> query_text, <24> filter=filter, </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 query_language=self.query_language, query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector_queries=vectors, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector_queries=vectors, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.insert_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.insert_message("assistant", self.answer) message_builder.insert_message("user", self.question) messages = message_builder.messages chatgpt</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> message_builder.insert_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["context"] = extra_info chat_completion.choices[0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder insert_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages: list[ChatCompletionMessageParam] = [ ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) ] at: text nonewlines(s: str) -> str at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.app/error_dict
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<0>:<add> if isinstance(error, APIError) and error.code == "content_filter": <del> if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter":
# module: app.backend.app def error_dict(error: Exception) -> dict: <0> if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": <1> return {"error": ERROR_MESSAGE_FILTER} <2> return {"error": ERROR_MESSAGE.format(error_type=type(error))} <3>
===========unchanged ref 0=========== at: app.backend.app ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" at: openai.error InvalidRequestError(message, param, code=None, http_body=None, http_status=None, json_body=None, headers=None) ===========changed ref 0=========== # module: app.backend.app - CONFIG_OPENAI_TOKEN = "openai_token" - CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 4=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 5=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 7=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 9=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 10=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 11=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 13=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 14=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 15=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 16=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 19=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 20=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) + ===========changed ref 21=========== # module: tests.conftest @pytest_asyncio.fixture() async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) + mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) + mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) - yield test_app.test_client()
app.backend.app/error_response
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<1>:<add> if isinstance(error, APIError) and error.code == "content_filter": <del> if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter":
# module: app.backend.app def error_response(error: Exception, route: str, status_code: int = 500): <0> logging.exception("Exception in %s: %s", route, error) <1> if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": <2> status_code = 400 <3> return jsonify(error_dict(error)), status_code <4>
===========unchanged ref 0=========== at: app.backend.app error_dict(error: Exception) -> dict at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai.error InvalidRequestError(message, param, code=None, http_body=None, http_status=None, json_body=None, headers=None) ===========changed ref 0=========== # module: app.backend.app def error_dict(error: Exception) -> dict: + if isinstance(error, APIError) and error.code == "content_filter": - if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": return {"error": ERROR_MESSAGE_FILTER} return {"error": ERROR_MESSAGE.format(error_type=type(error))} ===========changed ref 1=========== # module: app.backend.app - CONFIG_OPENAI_TOKEN = "openai_token" - CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 5=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 6=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 7=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 8=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 9=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 10=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 11=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 12=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 14=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 15=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 16=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 17=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } - ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncAzureOpenAI( + azure_endpoint=f"https://{self.open_ai_service}.openai.azure.com", + azure_deployment=self.open_ai_deployment, + api_key=await self.wrap_credential(), + api_version="2023-05-15", + ) + ===========changed ref 20=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) - self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] + ] self.model = chatgpt_model ===========changed ref 21=========== # module: tests.test_chatapproach + @pytest.fixture + def chat_approach(): + return ChatReadRetrieveReadApproach( + search_client=None, + openai_client=None, + chatgpt_model="gpt-35-turbo", + chatgpt_deployment="chat", + embedding_deployment="embeddings", + embedding_model="text-", + sourcepage_field="", + content_field="", + query_language="en-us", + query_speller="lexicon", + ) +
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<8>:<del> # Workaround for: https://github.com/openai/openai-python/issues/371 <9>:<del> async with aiohttp.ClientSession() as s: <10>:<del> openai.aiosession.set(s) <11>:<add> r = await approach.run( <del> r = await approach.run( <12>:<add> request_json["messages"], context=context, session_state=request_json.get("session_state") <del> request_json["messages"], context=context, session_state=request_json.get("session_state") <13>:<add> ) <del> )
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_ASK_APPROACH] <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await approach.run( <12> request_json["messages"], context=context, session_state=request_json.get("session_state") <13> ) <14> return jsonify(r) <15> except Exception as error: <16> return error_response(error, "/ask") <17>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_response(error: Exception, route: str, status_code: int=500) at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app def error_response(error: Exception, route: str, status_code: int = 500): logging.exception("Exception in %s: %s", route, error) + if isinstance(error, APIError) and error.code == "content_filter": - if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": status_code = 400 return jsonify(error_dict(error)), status_code ===========changed ref 1=========== # module: app.backend.app def error_dict(error: Exception) -> dict: + if isinstance(error, APIError) and error.code == "content_filter": - if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": return {"error": ERROR_MESSAGE_FILTER} return {"error": ERROR_MESSAGE.format(error_type=type(error))} ===========changed ref 2=========== # module: app.backend.app - CONFIG_OPENAI_TOKEN = "openai_token" - CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 5=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 6=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 7=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 8=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 9=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 10=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 11=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 12=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 13=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 14=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 15=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 16=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 17=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) + ===========changed ref 18=========== # module: tests.test_prepdocs + class RateLimitMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.RateLimitError( + message="Rate limited on the OpenAI embeddings API", response=fake_response(409), body=None + ) + ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } -
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<11>:<add> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") if OPENAI_HOST == "azure" else None <del> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12>:<add> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") if OPENAI_HOST == "azure" else None <del> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT")
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> # Shared by all OpenAI deployments <6> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <7> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <9> # Used with Azure OpenAI deployments <10> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <11> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <13> # Used only with non-Azure OpenAI deployments <14> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") <15> OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") <16> AZURE_USE_AUTHENTICATION = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" <17> AZURE_SERVER_APP_ID = os.getenv("AZURE_SERVER_APP_ID") <18> AZURE_SERVER_APP_SECRET = os.getenv("AZURE_SERVER_APP_SECRET") <19> AZURE_CLIENT_APP_ID = os.getenv("AZURE_CLIENT_APP_ID")</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 TOKEN_CACHE_PATH = os.getenv("TOKEN_CACHE_PATH") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") AZURE_SEARCH_QUERY_LANGUAGE = os.getenv("AZURE_SEARCH_QUERY_LANGUAGE", "en-us") AZURE_SEARCH_QUERY_SPELLER = os.getenv("AZURE_SEARCH_QUERY_SPELLER", "lexicon") # Use the current user identity to authenticate with Azure OpenAI, AI Search and Blob Storage (no secrets needed, # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) # Set up authentication helper auth_helper = AuthenticationHelper( use_authentication=AZURE_USE_AUTHENTICATION, server_app_id=AZURE_SERVER_APP_ID, server_app_secret=AZURE_SERVER_APP_SECRET, client_app_id=AZURE_CLIENT_APP_ID, tenant_id=AZURE_TENANT_ID, token_cache_path=TOKEN_CACHE_PATH, ) # Set up clients for AI Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s> blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api_type = "azure_ad" openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-07-01-preview" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token else: openai.api_type = "openai" openai.api_key = OPENAI_API_KEY openai.organization = OPENAI_ORGANIZATION current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_SEARCH_CLIENT] = search_client current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client current_app.config[CONFIG_AUTH_CLIENT] = auth_helper # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACH] = RetrieveThenReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, </s> ===========below chunk 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 3 <s>_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, AZURE_SEARCH_QUERY_LANGUAGE, AZURE_SEARCH_QUERY_SPELLER, ) current_app.config[CONFIG_CHAT_APPROACH] = ChatReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, AZURE_SEARCH_QUERY_LANGUAGE, AZURE_SEARCH_QUERY_SPELLER, ) ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], chatgpt_model: str, embedding_deployment: Optional[str], embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], chatgpt_model: str, embedding_deployment: Optional[str], embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) at: openai api_key = os.environ.get("OPENAI_API_KEY") organization = os.environ.get("OPENAI_ORGANIZATION") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") ===========unchanged ref 1=========== api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
aa02563ff18ce4f5f0cca15eaa59eb6155672f8e
Upgrade OpenAI SDK to v1 (#1017)
<7>:<add> # This tracks HTTP requests made by httpx/openai: <add> HTTPXClientInstrumentor().instrument()
# module: app.backend.app def create_app(): <0> app = Quart(__name__) <1> app.register_blueprint(bp) <2> <3> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <4> configure_azure_monitor() <5> # This tracks HTTP requests made by aiohttp: <6> AioHttpClientInstrumentor().instrument() <7> # This middleware tracks app route requests: <8> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[method-assign] <9> <10> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <11> default_level = "INFO" # In development, log more verbosely <12> if os.getenv("WEBSITE_HOSTNAME"): # In production, don't log as heavily <13> default_level = "WARNING" <14> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", default_level)) <15> <16> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <17> app.logger.info("CORS enabled for %s", allowed_origin) <18> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <19> return app <20>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: logging basicConfig(*, filename: Optional[StrPath]=..., filemode: str=..., format: str=..., datefmt: Optional[str]=..., style: str=..., level: Optional[_Level]=..., stream: Optional[IO[str]]=..., handlers: Optional[Iterable[Handler]]=...) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app - @bp.before_request - async def ensure_openai_token(): - if openai.api_type != "azure_ad": - return - openai_token = current_app.config[CONFIG_OPENAI_TOKEN] - if openai_token.expires_on < time.time() + 60: - openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token( - "https://cognitiveservices.azure.com/.default" - ) - current_app.config[CONFIG_OPENAI_TOKEN] = openai_token - openai.api_key = openai_token.token - ===========changed ref 1=========== # module: app.backend.app def error_response(error: Exception, route: str, status_code: int = 500): logging.exception("Exception in %s: %s", route, error) + if isinstance(error, APIError) and error.code == "content_filter": - if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": status_code = 400 return jsonify(error_dict(error)), status_code ===========changed ref 2=========== # module: app.backend.app def error_dict(error: Exception) -> dict: + if isinstance(error, APIError) and error.code == "content_filter": - if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": return {"error": ERROR_MESSAGE_FILTER} return {"error": ERROR_MESSAGE.format(error_type=type(error))} ===========changed ref 3=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: approach = current_app.config[CONFIG_ASK_APPROACH] - # Workaround for: https://github.com/openai/openai-python/issues/371 - async with aiohttp.ClientSession() as s: - openai.aiosession.set(s) + r = await approach.run( - r = await approach.run( + request_json["messages"], context=context, session_state=request_json.get("session_state") - request_json["messages"], context=context, session_state=request_json.get("session_state") + ) - ) return jsonify(r) except Exception as error: return error_response(error, "/ask") ===========changed ref 4=========== # module: app.backend.app - CONFIG_OPENAI_TOKEN = "openai_token" - CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddings(ABC): - def create_embedding_arguments(self) -> dict[str, Any]: - raise NotImplementedError - ===========changed ref 7=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 8=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + return self.create_embedding_response + ===========changed ref 9=========== # module: tests.test_searchmanager + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 10=========== # module: tests.test_prepdocs + class MockClient: + def __init__(self, embeddings_client): + self.embeddings = embeddings_client + ===========changed ref 11=========== # module: tests.test_searchmanager + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 12=========== # module: tests.test_prepdocs + class MockEmbeddingsClient: + def __init__(self, create_embedding_response: openai.types.CreateEmbeddingResponse): + self.create_embedding_response = create_embedding_response + ===========changed ref 13=========== # module: tests.test_prepdocs + def create_rate_limit_client(*args, **kwargs): + return MockClient(embeddings_client=RateLimitMockEmbeddingsClient()) + ===========changed ref 14=========== # module: tests.test_prepdocs + def create_auth_error_limit_client(*args, **kwargs): + return MockClient(embeddings_client=AuthenticationErrorMockEmbeddingsClient()) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): - def get_api_type(self) -> str: - return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" - ===========changed ref 17=========== # module: tests.test_app + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 18=========== # module: tests.test_prepdocs + def fake_response(http_code): + return Response(http_code, request=Request(method="get", url="https://foo.bar/")) + ===========changed ref 19=========== # module: tests.test_prepdocs + class AuthenticationErrorMockEmbeddingsClient: + def create(self, *args, **kwargs) -> openai.types.CreateEmbeddingResponse: + raise openai.AuthenticationError(message="Bad things happened.", response=fake_response(403), body=None) +
scripts.prepdocslib.textsplitter/TextSplitter.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<6>:<add> self.has_image_embeddings = has_image_embeddings
# module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): <0> self.sentence_endings = [".", "!", "?"] <1> self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] <2> self.max_section_length = 1000 <3> self.sentence_search_limit = 100 <4> self.section_overlap = 100 <5> self.verbose = verbose <6>
scripts.prepdocslib.textsplitter/TextSplitter.split_pages
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<0>:<add> # Chunking is disabled when using GPT4V. To be updated in the future. <add> if self.has_image_embeddings: <add> for i, page in enumerate(pages): <add> yield SplitPage(page_num=i, text=page.text) <add>
# module: scripts.prepdocslib.textsplitter class TextSplitter: def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: <0> def find_page(offset): <1> num_pages = len(pages) <2> for i in range(num_pages - 1): <3> if offset >= pages[i].offset and offset < pages[i + 1].offset: <4> return pages[i].page_num <5> return pages[num_pages - 1].page_num <6> <7> all_text = "".join(page.text for page in pages) <8> length = len(all_text) <9> start = 0 <10> end = length <11> while start + self.section_overlap < length: <12> last_word = -1 <13> end = start + self.max_section_length <14> <15> if end > length: <16> end = length <17> else: <18> # Try to find the end of the sentence <19> while ( <20> end < length <21> and (end - start - self.max_section_length) < self.sentence_search_limit <22> and all_text[end] not in self.sentence_endings <23> ): <24> if all_text[end] in self.word_breaks: <25> last_word = end <26> end += 1 <27> if end < length and all_text[end] not in self.sentence_endings and last_word > 0: <28> end = last_word # Fall back to at least keeping a whole word <29> if end < length: <30> end += 1 <31> <32> # Try to find the start of the sentence or at least a whole word boundary <33> last_word = -1 <34> while ( <35> start > 0 <36> and start > end - self.max_section_length - 2 * self.sentence_search_limit <37> and all_text[start] not in self.sentence_endings <38> ): <39> if all_text[start] in self.word_breaks: <40> last_word = start <41> start -= 1</s>
===========below chunk 0=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield SplitPage(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping if self.verbose: print( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield SplitPage(page_num=find_page(start), text=all_text[start:end]) ===========unchanged ref 0=========== at: scripts.prepdocslib.pdfparser Page(page_num: int, offset: int, text: str) at: scripts.prepdocslib.pdfparser.Page.__init__ self.page_num = page_num self.offset = offset self.text = text at: scripts.prepdocslib.textsplitter SplitPage(page_num: int, text: str) at: scripts.prepdocslib.textsplitter.TextSplitter.__init__ self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.has_image_embeddings = has_image_embeddings at: typing List = _alias(list, 1, inst=False, name='List') Generator = _alias(collections.abc.Generator, 3) ===========changed ref 0=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.verbose = verbose + self.has_image_embeddings = has_image_embeddings
scripts.prepdocslib.blobmanager/BlobManager.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<3>:<add> self.store_page_images = store_page_images <4>:<add> self.user_delegation_key: Optional[UserDelegationKey] = None
# module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): <0> self.endpoint = endpoint <1> self.credential = credential <2> self.container = container <3> self.verbose = verbose <4>
===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ + ===========changed ref 5=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.verbose = verbose + self.has_image_embeddings = has_image_embeddings ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def create_embeddings(self, blob_urls: List[str]) -> List[List[float]]: + headers = {"Ocp-Apim-Subscription-Key": self.credential} + params = {"api-version": "2023-02-01-preview", "modelVersion": "latest"} + endpoint = urljoin(self.endpoint, "computervision/retrieval:vectorizeImage") + embeddings: List[List[float]] = [] + async with aiohttp.ClientSession(headers=headers) as session: + for blob_url in blob_urls: + async for attempt in AsyncRetrying( + retry=retry_if_exception_type(Exception), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=self.before_retry_sleep, + ): + with attempt: + body = {"url": blob_url} + async with session.post(url=endpoint, params=params, json=body) as resp: + resp_json = await resp.json() + embeddings.append(resp_json["vector"]) + + return embeddings + ===========changed ref 7=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: + # Chunking is disabled when using GPT4V. To be updated in the future. + if self.has_image_embeddings: + for i, page in enumerate(pages): + yield SplitPage(page_num=i, text=page.text) + def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) length = len(all_text) start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): </s> ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: # offset: 1 <s> self.sentence_search_limit and all_text[start] not in self.sentence_endings ): if all_text[start] in self.word_breaks: last_word = start start -= 1 if all_text[start] not in self.sentence_endings and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield SplitPage(page_num=find_page(start), text=section_text) last_table_start = section_text.rfind("<table") if last_table_start > 2 * self.sentence_search_limit and last_table_start > section_text.rfind("</table"): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside sentence_search_limit, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside section_overlap, keep overlapping if self.verbose: print( f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}" ) start = min(end - self.section_overlap, start + last_table_start) else: start = end - self.section_overlap if start + self.section_overlap < end: yield SplitPage(page_num=find_page(start), text=all_text[start:end])
scripts.prepdocslib.blobmanager/BlobManager.remove_blob
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<13>:<add> if ( <add> prefix is not None <add> and ( <add> not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) <add> ) <del> if (prefix is not None and not re.match(rf"{prefix}-\d+\.pdf", blob_path)) or ( <14>:<add> ) or (path is not None and blob_path == os.path.basename(path)): <del> path is not None and blob_path == os.path.basename(path) <15>:<del> ):
# module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> return <5> if path is None: <6> prefix = None <7> blobs = container_client.list_blob_names() <8> else: <9> prefix = os.path.splitext(os.path.basename(path))[0] <10> blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) <11> async for blob_path in blobs: <12> # This still supports PDFs split into individual pages, but we could remove in future to simplify code <13> if (prefix is not None and not re.match(rf"{prefix}-\d+\.pdf", blob_path)) or ( <14> path is not None and blob_path == os.path.basename(path) <15> ): <16> continue <17> if self.verbose: <18> print(f"\tRemoving blob {blob_path}") <19> await container_client.delete_blob(blob_path) <20>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], store_page_images: bool=False, verbose: bool=False) at: scripts.prepdocslib.blobmanager.BlobManager blob_name_from_file_name(filename) -> str at: scripts.prepdocslib.blobmanager.BlobManager.__init__ self.store_page_images = store_page_images at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: typing List = _alias(list, 1, inst=False, name='List') at: typing.IO __slots__ = () ===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.container = container + self.store_page_images = store_page_images self.verbose = verbose + self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ + ===========changed ref 6=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.verbose = verbose + self.has_image_embeddings = has_image_embeddings ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def create_embeddings(self, blob_urls: List[str]) -> List[List[float]]: + headers = {"Ocp-Apim-Subscription-Key": self.credential} + params = {"api-version": "2023-02-01-preview", "modelVersion": "latest"} + endpoint = urljoin(self.endpoint, "computervision/retrieval:vectorizeImage") + embeddings: List[List[float]] = [] + async with aiohttp.ClientSession(headers=headers) as session: + for blob_url in blob_urls: + async for attempt in AsyncRetrying( + retry=retry_if_exception_type(Exception), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=self.before_retry_sleep, + ): + with attempt: + body = {"url": blob_url} + async with session.post(url=endpoint, params=params, json=body) as resp: + resp_json = await resp.json() + embeddings.append(resp_json["vector"]) + + return embeddings + ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: def split_pages(self, pages: List[Page]) -> Generator[SplitPage, None, None]: + # Chunking is disabled when using GPT4V. To be updated in the future. + if self.has_image_embeddings: + for i, page in enumerate(pages): + yield SplitPage(page_num=i, text=page.text) + def find_page(offset): num_pages = len(pages) for i in range(num_pages - 1): if offset >= pages[i].offset and offset < pages[i + 1].offset: return pages[i].page_num return pages[num_pages - 1].page_num all_text = "".join(page.text for page in pages) length = len(all_text) start = 0 end = length while start + self.section_overlap < length: last_word = -1 end = start + self.max_section_length if end > length: end = length else: # Try to find the end of the sentence while ( end < length and (end - start - self.max_section_length) < self.sentence_search_limit and all_text[end] not in self.sentence_endings ): if all_text[end] in self.word_breaks: last_word = end end += 1 if end < length and all_text[end] not in self.sentence_endings and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while ( start > 0 and start > end - self.max_section_length - 2 * self.sentence_search_limit and all_text[start] not in self.sentence_endings ): </s>
scripts.prepdocslib.searchmanager/SearchManager.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<4>:<add> self.search_images = search_images
# module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, + search_images: bool = False, ): <0> self.search_info = search_info <1> self.search_analyzer_name = search_analyzer_name <2> self.use_acls = use_acls <3> self.embeddings = embeddings <4>
===========unchanged ref 0=========== at: scripts.prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, disable_batch: bool=False, verbose: bool=False) at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str, verbose: bool=False) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 3=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 4=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.container = container + self.store_page_images = store_page_images self.verbose = verbose + self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 6=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ + ===========changed ref 7=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.verbose = verbose + self.has_image_embeddings = has_image_embeddings ===========changed ref 8=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def create_embeddings(self, blob_urls: List[str]) -> List[List[float]]: + headers = {"Ocp-Apim-Subscription-Key": self.credential} + params = {"api-version": "2023-02-01-preview", "modelVersion": "latest"} + endpoint = urljoin(self.endpoint, "computervision/retrieval:vectorizeImage") + embeddings: List[List[float]] = [] + async with aiohttp.ClientSession(headers=headers) as session: + for blob_url in blob_urls: + async for attempt in AsyncRetrying( + retry=retry_if_exception_type(Exception), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=self.before_retry_sleep, + ): + with attempt: + body = {"url": blob_url} + async with session.post(url=endpoint, params=params, json=body) as resp: + resp_json = await resp.json() + embeddings.append(resp_json["vector"]) + + return embeddings + ===========changed ref 9=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) async for blob_path in blobs: # This still supports PDFs split into individual pages, but we could remove in future to simplify code + if ( + prefix is not None + and ( + not re.match(rf"{prefix}-\d+\.pdf", blob_path) or not re.match(rf"{prefix}-\d+\.png", blob_path) + ) - if (prefix is not None and not re.match(rf"{prefix}-\d+\.pdf", blob_path)) or ( + ) or (path is not None and blob_path == os.path.basename(path)): - path is not None and blob_path == os.path.basename(path) - ): continue if self.verbose: print(f"\tRemoving blob {blob_path}") await container_client.delete_blob(blob_path)
scripts.prepdocslib.searchmanager/SearchManager.create_index
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<32>:<add> ) <add> if self.search_images: <add> fields.append( <add> SearchField( <add> name="imageEmbedding", <add> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <add> hidden=False, <add> searchable=True, <add> filterable=False, <add> sortable=False, <add> facetable=False, <add> vector_search_dimensions=1024, <add> vector_search_profile="embedding_config", <add> ),
# module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): <0> if self.search_info.verbose: <1> print(f"Ensuring search index {self.search_info.index_name} exists") <2> <3> async with self.search_info.create_search_index_client() as search_index_client: <4> fields = [ <5> SimpleField(name="id", type="Edm.String", key=True), <6> SearchableField(name="content", type="Edm.String", analyzer_name=self.search_analyzer_name), <7> SearchField( <8> name="embedding", <9> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <10> hidden=False, <11> searchable=True, <12> filterable=False, <13> sortable=False, <14> facetable=False, <15> vector_search_dimensions=1536, <16> vector_search_profile="embedding_config", <17> ), <18> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <19> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <20> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), <21> ] <22> if self.use_acls: <23> fields.append( <24> SimpleField( <25> name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True <26> ) <27> ) <28> fields.append( <29> SimpleField( <30> name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True <31> ) <32> ) <33> <34> index = SearchIndex( <35> name=self.search_info.index_name, <36> fields=fields, <37> semantic_settings=SemanticSettings( <38> configurations=[ <39> SemanticConfiguration( <40> name="default", <41> prioritized_fields=PrioritizedFields( <42> title</s>
===========below chunk 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): # offset: 1 ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswVectorSearchAlgorithmConfiguration( name="hnsw_config", kind=VectorSearchAlgorithmKind.HNSW, parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm="hnsw_config", ), ], ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: if self.search_info.verbose: print(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: if self.search_info.verbose: print(f"Search index {self.search_info.index_name} already exists") ===========unchanged ref 0=========== at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls at: scripts.prepdocslib.strategy.SearchInfo create_search_index_client() -> SearchIndexClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name self.verbose = verbose ===========changed ref 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, + search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.embeddings = embeddings + self.search_images = search_images ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 2=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 3=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 5=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 6=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.container = container + self.store_page_images = store_page_images self.verbose = verbose + self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 7=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ + ===========changed ref 8=========== # module: scripts.prepdocslib.textsplitter class TextSplitter: + def __init__(self, has_image_embeddings, verbose: bool = False): - def __init__(self, verbose: bool = False): self.sentence_endings = [".", "!", "?"] self.word_breaks = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] self.max_section_length = 1000 self.sentence_search_limit = 100 self.section_overlap = 100 self.verbose = verbose + self.has_image_embeddings = has_image_embeddings ===========changed ref 9=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def create_embeddings(self, blob_urls: List[str]) -> List[List[float]]: + headers = {"Ocp-Apim-Subscription-Key": self.credential} + params = {"api-version": "2023-02-01-preview", "modelVersion": "latest"} + endpoint = urljoin(self.endpoint, "computervision/retrieval:vectorizeImage") + embeddings: List[List[float]] = [] + async with aiohttp.ClientSession(headers=headers) as session: + for blob_url in blob_urls: + async for attempt in AsyncRetrying( + retry=retry_if_exception_type(Exception), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=self.before_retry_sleep, + ): + with attempt: + body = {"url": blob_url} + async with session.post(url=endpoint, params=params, json=body) as resp: + resp_json = await resp.json() + embeddings.append(resp_json["vector"]) + + return embeddings +
scripts.prepdocslib.searchmanager/SearchManager.update_content
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<10>:<add> "sourcepage": BlobManager.blob_image_name_from_file_page( <add> filename=section.content.filename(), page=section.split_page.page_num <add> ) <add> if image_embeddings <add> else BlobManager.sourcepage_from_file_page( <del> "sourcepage": BlobManager.sourcepage_from_file_page( <24>:<add> if image_embeddings: <add> for i, (document, section) in enumerate(zip(documents, batch)): <add> document["imageEmbedding"] = image_embeddings[section.split_page.page_num]
# module: scripts.prepdocslib.searchmanager class SearchManager: + def update_content(self, sections: List[Section], image_embeddings: Optional[List[List[float]]] = None): - def update_content(self, sections: List[Section]): <0> MAX_BATCH_SIZE = 1000 <1> section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)] <2> <3> async with self.search_info.create_search_client() as search_client: <4> for batch_index, batch in enumerate(section_batches): <5> documents = [ <6> { <7> "id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}", <8> "content": section.split_page.text, <9> "category": section.category, <10> "sourcepage": BlobManager.sourcepage_from_file_page( <11> filename=section.content.filename(), page=section.split_page.page_num <12> ), <13> "sourcefile": section.content.filename(), <14> **section.content.acls, <15> } <16> for section_index, section in enumerate(batch) <17> ] <18> if self.embeddings: <19> embeddings = await self.embeddings.create_embeddings( <20> texts=[section.split_page.text for section in batch] <21> ) <22> for i, document in enumerate(documents): <23> document["embedding"] = embeddings[i] <24> <25> await search_client.upload_documents(documents) <26>
===========unchanged ref 0=========== at: scripts.prepdocslib.listfilestrategy.File filename_to_id() at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) documents = [ { "id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}", "content": section.split_page.text, "category": section.category, "sourcepage": BlobManager.blob_image_name_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ) if image_embeddings else BlobManager.sourcepage_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ), "sourcefile": section.content.filename(), **section.content.acls, } for section_index, section in enumerate(batch) ] documents = [ { "id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}", "content": section.split_page.text, "category": section.category, "sourcepage": BlobManager.blob_image_name_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ) if image_embeddings else BlobManager.sourcepage_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ), "sourcefile": section.content.filename(), **section.content.acls, } for section_index, section in enumerate(batch) ] at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info ===========unchanged ref 1=========== at: scripts.prepdocslib.searchmanager.SearchManager.create_index index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswVectorSearchAlgorithmConfiguration( name="hnsw_config", kind=VectorSearchAlgorithmKind.HNSW, parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm="hnsw_config", ), ], ), ) at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page self.content = content self.category = category at: scripts.prepdocslib.strategy.SearchInfo create_search_client() -> SearchClient at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.index_name = index_name self.verbose = verbose at: scripts.prepdocslib.textsplitter.SplitPage.__init__ self.text = text at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, + search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.embeddings = embeddings + self.search_images = search_images ===========changed ref 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): if self.search_info.verbose: print(f"Ensuring search index {self.search_info.index_name} exists") async with self.search_info.create_search_index_client() as search_index_client: fields = [ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name=self.search_analyzer_name), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_profile="embedding_config", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), ] if self.use_acls: fields.append( SimpleField( name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True ) ) fields.append( SimpleField( name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True ) + ) + if self.search_images: + fields.append( + SearchField( + name="imageEmbedding", + type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, + searchable=True, + filterable=False, + sortable=False, + facetable=False, + vector_search_dimensions=1024, + vector_search_profile="embedding_config</s> ===========changed ref 2=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def create_index(self): # offset: 1 <s> facetable=False, + vector_search_dimensions=1024, + vector_search_profile="embedding_config", + ), ) index = SearchIndex( name=self.search_info.index_name, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithms=[ HnswVectorSearchAlgorithmConfiguration( name="hnsw_config", kind=VectorSearchAlgorithmKind.HNSW, parameters=HnswParameters(metric="cosine"), ) ], profiles=[ VectorSearchProfile( name="embedding_config", algorithm="hnsw_config", ), ], ), ) if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: if self.search_info.verbose: print(f"Creating {self.search_info.index_name} search index") await search_index_client.create_index(index) else: if self.search_info.verbose: print(f"Search index {self.search_info.index_name} already exists")
tests.test_blob_manager/test_upload_and_remove
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<2>:<add> filename = os.path.basename(f.content.name) <del> filename = f.content.name.split("/tmp/")[1]
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> filename = f.content.name.split("/tmp/")[1] <3> <4> # Set up mocks used by upload_blob <5> async def mock_exists(*args, **kwargs): <6> return True <7> <8> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <9> <10> async def mock_upload_blob(self, name, *args, **kwargs): <11> assert name == filename <12> return True <13> <14> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <15> <16> await blob_manager.upload_blob(f) <17> <18> # Set up mocks used by remove_blob <19> def mock_list_blob_names(*args, **kwargs): <20> assert kwargs.get("name_starts_with") == filename.split(".pdf")[0] <21> <22> class AsyncBlobItemsIterator: <23> def __init__(self, file): <24> self.files = [file, "dontdelete.pdf"] <25> <26> def __aiter__(self): <27> return self <28> <29> async def __anext__(self): <30> if self.files: <31> return self.files.pop() <32> raise StopAsyncIteration <33> <34> return AsyncBlobItemsIterator(filename) <35> <36> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) <37> <38> async def mock_delete_blob(self, name, *args, **kwargs): <39> assert name == filename <40> return True <41> <42> </s>
===========below chunk 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): # offset: 1 await blob_manager.remove_blob(f.content.name) ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: sys version_info: _version_info at: sys._version_info major: int minor: int micro: int releaselevel: str serial: int ===========unchanged ref 1=========== at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] at: typing.IO __slots__ = () at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: tests.mocks + + ===========changed ref 1=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 2=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 3=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 4=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 5=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 6=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 7=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 8=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 9=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 10=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 11=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 12=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 14=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 15=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 19=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 20=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 21=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 22=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) +
tests.test_blob_manager/test_upload_and_remove_all
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<3>:<add> filename = os.path.basename(f.content.name) <del> filename = f.content.name.split("/tmp/")[1]
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> print(f.content.name) <3> filename = f.content.name.split("/tmp/")[1] <4> <5> # Set up mocks used by upload_blob <6> async def mock_exists(*args, **kwargs): <7> return True <8> <9> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <10> <11> async def mock_upload_blob(self, name, *args, **kwargs): <12> assert name == filename <13> return True <14> <15> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <16> <17> await blob_manager.upload_blob(f) <18> <19> # Set up mocks used by remove_blob <20> def mock_list_blob_names(*args, **kwargs): <21> assert kwargs.get("name_starts_with") is None <22> <23> class AsyncBlobItemsIterator: <24> def __init__(self, file): <25> self.files = [file] <26> <27> def __aiter__(self): <28> return self <29> <30> async def __anext__(self): <31> if self.files: <32> return self.files.pop() <33> raise StopAsyncIteration <34> <35> return AsyncBlobItemsIterator(filename) <36> <37> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) <38> <39> async def mock_delete_blob(self, name, *args, **kwargs): <40> assert name == filename <41> return True <42> <43> monkey</s>
===========below chunk 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): # offset: 1 await blob_manager.remove_blob() ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skipif: _SkipifMarkDecorator at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: sys version_info: _version_info at: sys._version_info minor: int ===========unchanged ref 1=========== at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): with NamedTemporaryFile(suffix=".pdf") as temp_file: f = File(temp_file.file) + filename = os.path.basename(f.content.name) - filename = f.content.name.split("/tmp/")[1] # Set up mocks used by upload_blob async def mock_exists(*args, **kwargs): return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) async def mock_upload_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) await blob_manager.upload_blob(f) # Set up mocks used by remove_blob def mock_list_blob_names(*args, **kwargs): assert kwargs.get("name_starts_with") == filename.split(".pdf")[0] class AsyncBlobItemsIterator: def __init__(self, file): self.files = [file, "dontdelete.pdf"] def __aiter__(self): return self async def __anext__(self): if self.files: return self.files.pop() raise StopAsyncIteration return AsyncBlobItemsIterator(filename) monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) async def mock_delete_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock</s> ===========changed ref 1=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): # offset: 1 <s> return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) await blob_manager.remove_blob(f.content.name) ===========changed ref 2=========== + # module: tests.mocks + + ===========changed ref 3=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 4=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 5=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 6=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 7=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 8=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 9=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 10=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 11=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 13=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 14=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 15=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 16=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) -
tests.test_blob_manager/test_create_container_upon_upload
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<2>:<add> filename = os.path.basename(f.content.name) <del> filename = f.content.name.split("/tmp/")[1]
# module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_create_container_upon_upload(monkeypatch, mock_env, blob_manager): <0> with NamedTemporaryFile(suffix=".pdf") as temp_file: <1> f = File(temp_file.file) <2> filename = f.content.name.split("/tmp/")[1] <3> <4> # Set up mocks used by upload_blob <5> async def mock_exists(*args, **kwargs): <6> return False <7> <8> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) <9> <10> async def mock_create_container(*args, **kwargs): <11> return <12> <13> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.create_container", mock_create_container) <14> <15> async def mock_upload_blob(self, name, *args, **kwargs): <16> assert name == filename <17> return True <18> <19> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) <20> <21> await blob_manager.upload_blob(f) <22>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skipif: _SkipifMarkDecorator at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: sys version_info: _version_info at: sys._version_info minor: int ===========unchanged ref 1=========== at: tempfile NamedTemporaryFile(mode: str=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[Any] NamedTemporaryFile(mode: Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"], buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[str] NamedTemporaryFile(mode: Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"]=..., buffering: int=..., encoding: Optional[str]=..., newline: Optional[str]=..., suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=..., delete: bool=..., *, errors: Optional[str]=...) -> IO[bytes] ===========changed ref 0=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): with NamedTemporaryFile(suffix=".pdf") as temp_file: f = File(temp_file.file) print(f.content.name) + filename = os.path.basename(f.content.name) - filename = f.content.name.split("/tmp/")[1] # Set up mocks used by upload_blob async def mock_exists(*args, **kwargs): return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) async def mock_upload_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) await blob_manager.upload_blob(f) # Set up mocks used by remove_blob def mock_list_blob_names(*args, **kwargs): assert kwargs.get("name_starts_with") is None class AsyncBlobItemsIterator: def __init__(self, file): self.files = [file] def __aiter__(self): return self async def __anext__(self): if self.files: return self.files.pop() raise StopAsyncIteration return AsyncBlobItemsIterator(filename) monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) async def mock_delete_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob</s> ===========changed ref 1=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove_all(monkeypatch, mock_env, blob_manager): # offset: 1 <s> monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) await blob_manager.remove_blob() ===========changed ref 2=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): with NamedTemporaryFile(suffix=".pdf") as temp_file: f = File(temp_file.file) + filename = os.path.basename(f.content.name) - filename = f.content.name.split("/tmp/")[1] # Set up mocks used by upload_blob async def mock_exists(*args, **kwargs): return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) async def mock_upload_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) await blob_manager.upload_blob(f) # Set up mocks used by remove_blob def mock_list_blob_names(*args, **kwargs): assert kwargs.get("name_starts_with") == filename.split(".pdf")[0] class AsyncBlobItemsIterator: def __init__(self, file): self.files = [file, "dontdelete.pdf"] def __aiter__(self): return self async def __anext__(self): if self.files: return self.files.pop() raise StopAsyncIteration return AsyncBlobItemsIterator(filename) monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.list_blob_names", mock_list_blob_names) async def mock_delete_blob(self, name, *args, **kwargs): assert name == filename return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock</s> ===========changed ref 3=========== # module: tests.test_blob_manager @pytest.mark.asyncio @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") async def test_upload_and_remove(monkeypatch, mock_env, blob_manager): # offset: 1 <s> return True monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) await blob_manager.remove_blob(f.content.name) ===========changed ref 4=========== + # module: tests.mocks + + ===========changed ref 5=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 6=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 7=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 8=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self +
app.backend.core.modelhelper/get_token_limit
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<1>:<add> raise ValueError(f"Expected model gpt-35-turbo and above. Received: {model_id}") <del> raise ValueError("Expected model gpt-35-turbo and above")
# module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: <0> if model_id not in MODELS_2_TOKEN_LIMITS: <1> raise ValueError("Expected model gpt-35-turbo and above") <2> return MODELS_2_TOKEN_LIMITS[model_id] <3>
===========unchanged ref 0=========== at: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000, "gpt-4v": 128000, } ===========changed ref 0=========== # module: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000, + "gpt-4v": 128000, } - AOAI_2_OAI = {"gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k"} + AOAI_2_OAI = {"gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k", "gpt-4v": "gpt-4-turbo-vision"} + ===========changed ref 1=========== + # module: tests.mocks + + ===========changed ref 2=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 3=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 4=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 5=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 6=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 7=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 8=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 9=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 10=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 11=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 12=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 13=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 14=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 15=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 16=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 20=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 21=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 22=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 23=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) + ===========changed ref 24=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 25=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, + search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.embeddings = embeddings + self.search_images = search_images ===========changed ref 26=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.container = container + self.store_page_images = store_page_images self.verbose = verbose + self.user_delegation_key: Optional[UserDelegationKey] = None ===========changed ref 27=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ +
app.backend.core.modelhelper/num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<13>:<add> <15>:<add> for key, value in message.items(): <del> for value in message.values(): <16>:<add> if isinstance(value, list): <add> for v in value: <add> # TODO: Update token count for images https://github.com/openai/openai-cookbook/pull/881/files <add> if isinstance(v, str): <add> num_tokens += len(encoding.encode(v)) <add> else: <add> num_tokens += len(encoding.encode(value)) <del> num_tokens += len(encoding.encode(str(value)))
# module: app.backend.core.modelhelper def num_tokens_from_messages(message: dict[str, str], model: str) -> int: <0> """ <1> Calculate the number of tokens required to encode a message. <2> Args: <3> message (dict): The message to encode, represented as a dictionary. <4> model (str): The name of the model to use for encoding. <5> Returns: <6> int: The total number of tokens required to encode the message. <7> Example: <8> message = {'role': 'user', 'content': 'Hello, how are you?'} <9> model = 'gpt-3.5-turbo' <10> num_tokens_from_messages(message, model) <11> output: 11 <12> """ <13> encoding = tiktoken.encoding_for_model(get_oai_chatmodel_tiktok(model)) <14> num_tokens = 2 # For "role" and "content" keys <15> for value in message.values(): <16> num_tokens += len(encoding.encode(str(value))) <17> return num_tokens <18>
===========unchanged ref 0=========== at: app.backend.core.modelhelper get_oai_chatmodel_tiktok(aoaimodel: str) -> str at: tiktoken.model encoding_for_model(model_name: str) -> Encoding ===========changed ref 0=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: + raise ValueError(f"Expected model gpt-35-turbo and above. Received: {model_id}") - raise ValueError("Expected model gpt-35-turbo and above") return MODELS_2_TOKEN_LIMITS[model_id] ===========changed ref 1=========== # module: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000, + "gpt-4v": 128000, } - AOAI_2_OAI = {"gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k"} + AOAI_2_OAI = {"gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k", "gpt-4v": "gpt-4-turbo-vision"} + ===========changed ref 2=========== + # module: tests.mocks + + ===========changed ref 3=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 4=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 5=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 6=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 7=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 8=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 9=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 10=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 11=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 13=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 14=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 15=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 16=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 17=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 18=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 19=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 21=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 22=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 23=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 24=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) + ===========changed ref 25=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 26=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def __init__( self, search_info: SearchInfo, search_analyzer_name: Optional[str] = None, use_acls: bool = False, embeddings: Optional[OpenAIEmbeddings] = None, + search_images: bool = False, ): self.search_info = search_info self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.embeddings = embeddings + self.search_images = search_images ===========changed ref 27=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def __init__( self, endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], + store_page_images: bool = False, verbose: bool = False, ): self.endpoint = endpoint self.credential = credential self.container = container + self.store_page_images = store_page_images self.verbose = verbose + self.user_delegation_key: Optional[UserDelegationKey] = None
tests.e2e/test_chat
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
# module: tests.e2e def test_chat(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint with streaming results <1> def handle(route: Route): <2> # Assert that session_state is specified in the request (None for now) <3> session_state = route.request.post_data_json["session_state"] <4> assert session_state is None <5> # Read the JSONL from our snapshot results and return as the response <6> f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") <7> jsonl = f.read() <8> f.close() <9> route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) <10> <11> page.route("*/**/chat", handle) <12> <13> # Check initial page state <14> page.goto(live_server_url) <15> expect(page).to_have_title("GPT + Enterprise data | Sample") <16> expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() <17> expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() <18> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <19> <20> # Ask a question and wait for the message to appear <21> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <22> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <23> "Whats the dental plan?" <24> ) <25> page.get_by_role("button", name="Ask question button").click() <26> <27> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <28> expect(page.get_by_text("The capital of France is</s>
===========below chunk 0=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # offset: 1 expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() # Show the citation document page.get_by_text("1. Benefit_Options-2.pdf").click() expect(page.get_by_role("tab", name="Citation")).to_be_visible() expect(page.get_by_title("Citation")).to_be_visible() # Show the thought process page.get_by_label("Show thought process").click() expect(page.get_by_title("Thought process")).to_be_visible() expect(page.get_by_text("Searched for:")).to_be_visible() # Show the supporting content page.get_by_label("Show supporting content").click() expect(page.get_by_title("Supporting content")).to_be_visible() expect(page.get_by_role("heading", name="Benefit_Options-2.pdf")).to_be_visible() # Clear the chat page.get_by_role("button", name="Clear chat").click() expect(page.get_by_text("Whats the dental plan?")).not_to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).not_to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() ===========unchanged ref 0=========== at: io.BufferedRandom close(self) -> None at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 1=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 2=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 3=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 4=========== + # module: tests + + ===========changed ref 5=========== + # module: app.backend.core.imageshelper + + ===========changed ref 6=========== + # module: tests.mocks + + ===========changed ref 7=========== # module: tests.test_content_file - - ===========changed ref 8=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 10=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 11=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 13=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 17=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 19=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 20=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 22=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 23=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 24=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 25=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 26=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 27=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 28=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 29=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 32=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 33=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 34=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 35=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" +
app.backend.core.messagebuilder/MessageBuilder.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<1>:<add> ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", system_content)) <del> ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content))
# module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): <0> self.messages: list[ChatCompletionMessageParam] = [ <1> ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) <2> ] <3> self.model = chatgpt_model <4>
===========unchanged ref 0=========== at: unicodedata normalize(form: Text, unistr: Text, /) -> Text ===========changed ref 0=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 1=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 2=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 3=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 4=========== + # module: tests + + ===========changed ref 5=========== + # module: app.backend.core.imageshelper + + ===========changed ref 6=========== + # module: tests.mocks + + ===========changed ref 7=========== # module: tests.test_content_file - - ===========changed ref 8=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 10=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 11=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 13=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 17=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 19=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 20=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 22=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 23=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 24=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 25=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 26=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 27=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 28=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 29=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 32=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 33=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 34=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 35=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 36=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 37=========== + # module: tests.test_chatvisionapproach + def test_build_filter(chat_approach): + result = chat_approach.build_filter({"exclude_category": "test_category"}, {}) + assert result == "category ne 'test_category'" + ===========changed ref 38=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) + ===========changed ref 39=========== + # module: app.backend.core.imageshelper + class ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image.""" + ===========changed ref 40=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_embedding_arguments(self) -> dict[str, Any]: + return { + "model": self.open_ai_model_name, + "api_key": self.credential, + "api_type": "openai", + "organization": self.organization, + } + ===========changed ref 41=========== + # module: app.backend.approaches.retrievethenreadvision + # Replace these with your own values, either in environment variables or directly here + AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") + AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") +
app.backend.core.messagebuilder/MessageBuilder.insert_message
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<5>:<add> content (str | List[ChatCompletionContentPartParam]): The content of the message. <del> content (str): The content of the message. <11>:<add> elif role == "system" and isinstance(content, str): <del> elif role == "system": <12>:<add> message = ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", content)) <del> message = ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(content)) <13>:<add> elif role == "assistant" and isinstance(content, str): <add> message = ChatCompletionAssistantMessageParam( <add> role="assistant", content=unicodedata.normalize("NFC", content) <add> ) <del> elif role == "assistant": <14>:<del> message = ChatCompletionAssistantMessageParam(role="assistant", content=self.normalize_content(content))
# module: app.backend.core.messagebuilder class MessageBuilder: + def insert_message(self, role: str, content: Union[str, List[ChatCompletionContentPartParam]], index: int = 1): - def insert_message(self, role: str, content: str, index: int = 1): <0> """ <1> Inserts a message into the conversation at the specified index, <2> or at index 1 (after system message) if no index is specified. <3> Args: <4> role (str): The role of the message sender (either "user", "system", or "assistant"). <5> content (str): The content of the message. <6> index (int): The index at which to insert the message. <7> """ <8> message: ChatCompletionMessageParam <9> if role == "user": <10> message = ChatCompletionUserMessageParam(role="user", content=self.normalize_content(content)) <11> elif role == "system": <12> message = ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(content)) <13> elif role == "assistant": <14> message = ChatCompletionAssistantMessageParam(role="assistant", content=self.normalize_content(content)) <15> else: <16> raise ValueError(f"Invalid role: {role}") <17> self.messages.insert(index, message) <18>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder normalize_content(content: Union[str, List[ChatCompletionContentPartParam]]) at: typing List = _alias(list, 1, inst=False, name='List') at: unicodedata normalize(form: Text, unistr: Text, /) -> Text ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", system_content)) - ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) ] self.model = chatgpt_model ===========changed ref 1=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 2=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 4=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 5=========== + # module: tests + + ===========changed ref 6=========== + # module: app.backend.core.imageshelper + + ===========changed ref 7=========== + # module: tests.mocks + + ===========changed ref 8=========== # module: tests.test_content_file - - ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 18=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 20=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 23=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 25=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 26=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 28=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 29=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 33=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 34=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 35=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 36=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 37=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 38=========== + # module: tests.test_chatvisionapproach + def test_build_filter(chat_approach): + result = chat_approach.build_filter({"exclude_category": "test_category"}, {}) + assert result == "category ne 'test_category'" + ===========changed ref 39=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) + ===========changed ref 40=========== + # module: app.backend.core.imageshelper + class ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image.""" +
app.backend.core.messagebuilder/MessageBuilder.normalize_content
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<0>:<add> if isinstance(content, str): <add> return unicodedata.normalize("NFC", content) <del> return unicodedata.normalize("NFC", content) <1>:<add> elif isinstance(content, list): <add> for part in content: <add> if "image_url" not in part: <add> part["text"] = unicodedata.normalize("NFC", part["text"]) <add> return content
# module: app.backend.core.messagebuilder class MessageBuilder: + def normalize_content(self, content: Union[str, List[ChatCompletionContentPartParam]]): - def normalize_content(self, content: str): <0> return unicodedata.normalize("NFC", content) <1>
===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages: list[ChatCompletionMessageParam] = [ + ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", system_content)) - ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(system_content)) ] self.model = chatgpt_model ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def insert_message(self, role: str, content: Union[str, List[ChatCompletionContentPartParam]], index: int = 1): - def insert_message(self, role: str, content: str, index: int = 1): """ Inserts a message into the conversation at the specified index, or at index 1 (after system message) if no index is specified. Args: role (str): The role of the message sender (either "user", "system", or "assistant"). + content (str | List[ChatCompletionContentPartParam]): The content of the message. - content (str): The content of the message. index (int): The index at which to insert the message. """ message: ChatCompletionMessageParam if role == "user": message = ChatCompletionUserMessageParam(role="user", content=self.normalize_content(content)) + elif role == "system" and isinstance(content, str): - elif role == "system": + message = ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", content)) - message = ChatCompletionSystemMessageParam(role="system", content=self.normalize_content(content)) + elif role == "assistant" and isinstance(content, str): + message = ChatCompletionAssistantMessageParam( + role="assistant", content=unicodedata.normalize("NFC", content) + ) - elif role == "assistant": - message = ChatCompletionAssistantMessageParam(role="assistant", content=self.normalize_content(content)) else: raise ValueError(f"Invalid role: {role}") self.messages.insert(index, message) ===========changed ref 2=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 3=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 4=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 5=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 6=========== + # module: tests + + ===========changed ref 7=========== + # module: app.backend.core.imageshelper + + ===========changed ref 8=========== + # module: tests.mocks + + ===========changed ref 9=========== # module: tests.test_content_file - - ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 11=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 12=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 14=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 19=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 21=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 23=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 24=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 25=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 26=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 27=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 28=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 29=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 30=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 33=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 34=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 35=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 36=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} +
tests.test_app/test_chat_with_history
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<18>:<add> assert thoughts_contains_text(result["choices"][0]["context"]["thoughts"], "performance review") <del> assert result["choices"][0]["context"]["thoughts"].find("performance review") != -1
# module: tests.test_app @pytest.mark.asyncio async def test_chat_with_history(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "messages": [ <4> {"content": "What happens in a performance review?", "role": "user"}, <5> { <6> "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <7> "role": "assistant", <8> }, <9> {"content": "Is dental covered?", "role": "user"}, <10> ], <11> "context": { <12> "overrides": {"retrieval_mode": "text"}, <13> }, <14> }, <15> ) <16> assert response.status_code == 200 <17> result = await response.get_json() <18> assert result["choices"][0]["context"]["thoughts"].find("performance review") != -1 <19> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <20>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: tests.test_app.test_chat_stream_text_filter response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ "stream": True, "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", } }, }, ) ===========changed ref 0=========== # module: tests.test_app + def thoughts_contains_text(thoughts, text): + found = False + for thought in thoughts: + description = thought["description"] + if isinstance(description, str) and text in description: + found = True + break + elif isinstance(description, list) and any(text in item for item in description): + found = True + break + return found + ===========changed ref 1=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 2=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 4=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 5=========== + # module: tests + + ===========changed ref 6=========== + # module: app.backend.core.imageshelper + + ===========changed ref 7=========== + # module: tests.mocks + + ===========changed ref 8=========== # module: tests.test_content_file - - ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 18=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 20=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 23=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 25=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 26=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 28=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 29=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 33=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 34=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 35=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 36=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 37=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. + ===========changed ref 38=========== + # module: tests.test_chatvisionapproach + def test_build_filter(chat_approach): + result = chat_approach.build_filter({"exclude_category": "test_category"}, {}) + assert result == "category ne 'test_category'" + ===========changed ref 39=========== + # module: tests.mocks + class MockBlob: + def __init__(self): + self.properties = BlobProperties( + name="Financial Market Analysis Report 2023-7.png", content_settings={"content_type": "image/png"} + ) +
tests.test_app/test_chat_with_long_history
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<22>:<add> assert not thoughts_contains_text(result["choices"][0]["context"]["thoughts"], "Is there a dress code?") <del> assert result["choices"][0]["context"]["thoughts"].find("Is there a dress code?") == -1
# module: tests.test_app @pytest.mark.asyncio async def test_chat_with_long_history(client, snapshot, caplog): <0> """This test makes sure that the history is truncated to max tokens minus 1024.""" <1> caplog.set_level(logging.DEBUG) <2> response = await client.post( <3> "/chat", <4> json={ <5> "messages": [ <6> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <7> { <8> "role": "assistant", <9> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]" <10> * 150, <11> }, # 3900 tokens <12> {"role": "user", "content": "What does a product manager do?"}, # 10 tokens <13> ], <14> "context": { <15> "overrides": {"retrieval_mode": "text"}, <16> }, <17> }, <18> ) <19> assert response.status_code == 200 <20> result = await response.get_json() <21> # Assert that it doesn't find the first message, since it wouldn't fit in the max tokens. <22> assert result["choices"][0]["context"]["thoughts"].find("Is there a dress code?") == -1 <23> assert "Reached max tokens" in caplog.text <24> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <25>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging DEBUG = 10 at: tests.test_app thoughts_contains_text(thoughts, text) at: tests.test_app.test_chat_with_history response = await client.post( "/chat", json={ "messages": [ {"content": "What happens in a performance review?", "role": "user"}, { "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", "role": "assistant", }, {"content": "Is dental covered?", "role": "user"}, ], "context": { "overrides": {"retrieval_mode": "text"}, }, }, ) ===========changed ref 0=========== # module: tests.test_app + def thoughts_contains_text(thoughts, text): + found = False + for thought in thoughts: + description = thought["description"] + if isinstance(description, str) and text in description: + found = True + break + elif isinstance(description, list) and any(text in item for item in description): + found = True + break + return found + ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_with_history(client, snapshot): response = await client.post( "/chat", json={ "messages": [ {"content": "What happens in a performance review?", "role": "user"}, { "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", "role": "assistant", }, {"content": "Is dental covered?", "role": "user"}, ], "context": { "overrides": {"retrieval_mode": "text"}, }, }, ) assert response.status_code == 200 result = await response.get_json() + assert thoughts_contains_text(result["choices"][0]["context"]["thoughts"], "performance review") - assert result["choices"][0]["context"]["thoughts"].find("performance review") != -1 snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 3=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 4=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 5=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 6=========== + # module: tests + + ===========changed ref 7=========== + # module: app.backend.core.imageshelper + + ===========changed ref 8=========== + # module: tests.mocks + + ===========changed ref 9=========== # module: tests.test_content_file - - ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 11=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 12=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 14=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 19=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 21=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 23=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 24=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 25=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 26=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 27=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) -
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<1>:<add> self.chatgpt_deployment = chatgpt_deployment
<s>_client: SearchClient, openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.chatgpt_model = chatgpt_model <3> self.embedding_model = embedding_model <4> self.chatgpt_deployment = chatgpt_deployment <5> self.embedding_deployment = embedding_deployment <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.query_language = query_language <9> self.query_speller = query_speller <10>
===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) ===========changed ref 0=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 1=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 2=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 3=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 4=========== + # module: tests + + ===========changed ref 5=========== + # module: app.backend.core.imageshelper + + ===========changed ref 6=========== + # module: tests.mocks + + ===========changed ref 7=========== # module: tests.test_content_file - - ===========changed ref 8=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 10=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 11=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 13=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 17=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 19=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 20=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 22=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 23=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 24=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 25=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 26=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 27=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 28=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 29=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 32=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 33=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 34=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 35=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" +
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<5>:<add> use_semantic_ranker = overrides.get("semantic_ranker") and has_text <add> <11>:<del> embedding = await self.openai_client.embeddings.create( <12>:<del> # Azure Open AI takes the deployment name as the model name <13>:<del> model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, <14>:<del> input=q, <15>:<del> ) <16>:<del> query_vector = embedding.data[0].embedding <17>:<del> vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding")) <18>:<add> vectors.append(await self.compute_text_embedding(q)) <20>:<add> query_text = q if has_text else None <del> query_text = q if has_text else "" <22>:<del> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <23>:<del> if overrides.get("semantic_ranker") and has_text: <24>:<del> r = await self.search_client.search( <25>:<del> query_text, <26>:<del> filter=filter,
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> # If retrieval mode includes vectors, compute an embedding for the query <9> vectors: list[VectorQuery] = [] <10> if has_vector: <11> embedding = await self.openai_client.embeddings.create( <12> # Azure Open AI takes the deployment name as the model name <13> model=self.embedding_deployment if self.embedding_deployment else self.embedding_model, <14> input=q, <15> ) <16> query_vector = embedding.data[0].embedding <17> vectors.append(RawVectorQuery(vector=query_vector, k=50, fields="embedding")) <18> <19> # Only keep the text query if the retrieval mode uses text, otherwise drop it <20> query_text = q if has_text else "" <21> <22> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <23> if overrides.get("semantic_ranker") and has_text: <24> r = await self.search_client.search( <25> query_text, <26> filter=filter, </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 query_language=self.query_language, query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector_queries=vectors, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector_queries=vectors, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.insert_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.insert_message("assistant", self.answer) message_builder.insert_message("user", self.question) chat_completion = ( await self.openai</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> message_builder.insert_message("user", self.question) chat_completion = ( await self.openai_client.chat.completions.create( # Azure Open AI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=message_builder.messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) ).model_dump() extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in message_builder.messages]), } chat_completion["choices"][0]["context"] = extra_info chat_completion["choices"][0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.openai_client = openai_client self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_semantic_ranker: bool, use_semantic_captions: bool) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder insert_message(role: str, content: Union[str, List[ChatCompletionContentPartParam]], index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages: list[ChatCompletionMessageParam] = [ ChatCompletionSystemMessageParam(role="system", content=unicodedata.normalize("NFC", system_content)) ] at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<7>:<add> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <add> approach: Approach <add> if use_gpt4v and CONFIG_ASK_VISION_APPROACH in current_app.config: <add> approach = cast(Approach, current_app.config[CONFIG_ASK_VISION_APPROACH]) <add> else: <add> approach = cast(Approach, current_app.config[CONFIG_ASK_APPROACH]) <del> approach = current_app.config[CONFIG_ASK_APPROACH]
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_ASK_APPROACH] <8> r = await approach.run( <9> request_json["messages"], context=context, session_state=request_json.get("session_state") <10> ) <11> return jsonify(r) <12> except Exception as error: <13> return error_response(error, "/ask") <14>
===========unchanged ref 0=========== at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_response(error: Exception, route: str, status_code: int=500) ===========changed ref 0=========== # module: app.backend.app + CONFIG_OPENAI_TOKEN = "openai_token" + CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" + CONFIG_ASK_VISION_APPROACH = "ask_vision_approach" + CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEARCH_CLIENT = "search_client" CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 1=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 2=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 4=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 5=========== + # module: tests + + ===========changed ref 6=========== + # module: app.backend.core.imageshelper + + ===========changed ref 7=========== + # module: tests.mocks + + ===========changed ref 8=========== # module: tests.test_content_file - - ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 18=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 20=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 23=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 25=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 26=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 28=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 29=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 33=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 34=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 35=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 36=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 37=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. +
app.backend.app/format_as_ndjson
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<2>:<add> yield json.dumps(event, ensure_ascii=False, cls=JSONEncoder) + "\n" <del> yield json.dumps(event, ensure_ascii=False) + "\n" <3>:<add> except Exception as error: <del> except Exception as e: <4>:<add> logging.exception("Exception while generating response stream: %s", error) <del> logging.exception("Exception while generating response stream: %s", e) <5>:<add> yield json.dumps(error_dict(error)) <del> yield json.dumps(error_dict(e))
# module: app.backend.app def format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None]: <0> try: <1> async for event in r: <2> yield json.dumps(event, ensure_ascii=False) + "\n" <3> except Exception as e: <4> logging.exception("Exception while generating response stream: %s", e) <5> yield json.dumps(error_dict(e)) <6>
===========unchanged ref 0=========== at: app.backend.app error_dict(error: Exception) -> dict at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: + use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) + approach: Approach + if use_gpt4v and CONFIG_ASK_VISION_APPROACH in current_app.config: + approach = cast(Approach, current_app.config[CONFIG_ASK_VISION_APPROACH]) + else: + approach = cast(Approach, current_app.config[CONFIG_ASK_APPROACH]) - approach = current_app.config[CONFIG_ASK_APPROACH] r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) except Exception as error: return error_response(error, "/ask") ===========changed ref 1=========== # module: app.backend.app + CONFIG_OPENAI_TOKEN = "openai_token" + CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" + CONFIG_ASK_VISION_APPROACH = "ask_vision_approach" + CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEARCH_CLIENT = "search_client" CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 2=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 3=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 4=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 5=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 6=========== + # module: tests + + ===========changed ref 7=========== + # module: app.backend.core.imageshelper + + ===========changed ref 8=========== + # module: tests.mocks + + ===========changed ref 9=========== # module: tests.test_content_file - - ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 11=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 12=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 14=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 19=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 21=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 23=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 24=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 25=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 26=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 27=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 28=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 29=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 30=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) -
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<6>:<add> <7>:<add> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <add> approach: Approach <add> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <add> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <add> else: <add> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <del> approach = current_app.config[CONFIG_CHAT_APPROACH] <8>:<add>
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run( <9> request_json["messages"], <10> stream=request_json.get("stream", False), <11> context=context, <12> session_state=request_json.get("session_state"), <13> ) <14> if isinstance(result, dict): <15> return jsonify(result) <16> else: <17> response = await make_response(format_as_ndjson(result)) <18> response.timeout = None # type: ignore <19> response.mimetype = "application/json-lines" <20> return response <21> except Exception as error: <22> return error_response(error, "/chat") <23>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_response(error: Exception, route: str, status_code: int=500) format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] ===========changed ref 0=========== # module: app.backend.app def format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None]: try: async for event in r: + yield json.dumps(event, ensure_ascii=False, cls=JSONEncoder) + "\n" - yield json.dumps(event, ensure_ascii=False) + "\n" + except Exception as error: - except Exception as e: + logging.exception("Exception while generating response stream: %s", error) - logging.exception("Exception while generating response stream: %s", e) + yield json.dumps(error_dict(error)) - yield json.dumps(error_dict(e)) ===========changed ref 1=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: + use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) + approach: Approach + if use_gpt4v and CONFIG_ASK_VISION_APPROACH in current_app.config: + approach = cast(Approach, current_app.config[CONFIG_ASK_VISION_APPROACH]) + else: + approach = cast(Approach, current_app.config[CONFIG_ASK_APPROACH]) - approach = current_app.config[CONFIG_ASK_APPROACH] r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) except Exception as error: return error_response(error, "/ask") ===========changed ref 2=========== # module: app.backend.app + CONFIG_OPENAI_TOKEN = "openai_token" + CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" + CONFIG_ASK_VISION_APPROACH = "ask_vision_approach" + CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEARCH_CLIENT = "search_client" CONFIG_OPENAI_CLIENT = "openai_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 3=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 4=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 5=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 6=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 7=========== + # module: tests + + ===========changed ref 8=========== + # module: app.backend.core.imageshelper + + ===========changed ref 9=========== + # module: tests.mocks + + ===========changed ref 10=========== # module: tests.test_content_file - - ===========changed ref 11=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 12=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 13=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 14=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 15=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 19=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 20=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 21=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 22=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 23=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 24=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 25=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 26=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 27=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 28=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 29=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 30=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 31=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose +
scripts.prepdocs/setup_file_strategy
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<5>:<add> store_page_images=args.searchimages,
# module: scripts.prepdocs + def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: - def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: <0> storage_creds = credential if is_key_empty(args.storagekey) else args.storagekey <1> blob_manager = BlobManager( <2> endpoint=f"https://{args.storageaccount}.blob.core.windows.net", <3> container=args.container, <4> credential=storage_creds, <5> verbose=args.verbose, <6> ) <7> <8> pdf_parser: PdfParser <9> if args.localpdfparser: <10> pdf_parser = LocalPdfParser() <11> else: <12> # check if Azure Document Intelligence credentials are provided <13> if args.formrecognizerservice is None: <14> print( <15> "Error: Azure Document Intelligence service is not provided. Please provide --formrecognizerservice or use --localpdfparser for local pypdf parser." <16> ) <17> exit(1) <18> formrecognizer_creds: Union[AsyncTokenCredential, AzureKeyCredential] = ( <19> credential if is_key_empty(args.formrecognizerkey) else AzureKeyCredential(args.formrecognizerkey) <20> ) <21> pdf_parser = DocumentAnalysisPdfParser( <22> endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", <23> credential=formrecognizer_creds, <24> verbose=args.verbose, <25> ) <26> <27> use_vectors = not args.novectors <28> embeddings: Optional[OpenAIEmbeddings] = None <29> if use_vectors and args.openaihost != "openai": <30> azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( <31> credential if is_key_empty(args.openaikey) else AzureKeyCredential(args.openaikey) <32> ) <33> embeddings = AzureOpenAIEmbeddingService( <34> open_ai_</s>
===========below chunk 0=========== # module: scripts.prepdocs + def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: - def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: # offset: 1 open_ai_deployment=args.openaideployment, open_ai_model_name=args.openaimodelname, credential=azure_open_ai_credential, disable_batch=args.disablebatchvectors, verbose=args.verbose, ) elif use_vectors: embeddings = OpenAIEmbeddingService( open_ai_model_name=args.openaimodelname, credential=args.openaikey, organization=args.openaiorg, disable_batch=args.disablebatchvectors, verbose=args.verbose, ) print("Processing files...") list_file_strategy: ListFileStrategy if args.datalakestorageaccount: adls_gen2_creds = credential if is_key_empty(args.datalakekey) else args.datalakekey print(f"Using Data Lake Gen2 Storage Account {args.datalakestorageaccount}") list_file_strategy = ADLSGen2ListFileStrategy( data_lake_storage_account=args.datalakestorageaccount, data_lake_filesystem=args.datalakefilesystem, data_lake_path=args.datalakepath, credential=adls_gen2_creds, verbose=args.verbose, ) else: print(f"Using local files in {args.files}") list_file_strategy = LocalListFileStrategy(path_pattern=args.files, verbose=args.verbose) if args.removeall: document_action = DocumentAction.RemoveAll elif args.remove: document_action = DocumentAction.Remove else: document_action = DocumentAction.Add return FileStrategy( list_file_strategy=list_file_strategy, blob_manager=blob_manager</s> ===========below chunk 1=========== # module: scripts.prepdocs + def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: - def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: # offset: 2 <s> return FileStrategy( list_file_strategy=list_file_strategy, blob_manager=blob_manager, pdf_parser=pdf_parser, text_splitter=TextSplitter(), document_action=document_action, embeddings=embeddings, search_analyzer_name=args.searchanalyzername, use_acls=args.useacls, category=args.category, ) ===========unchanged ref 0=========== at: prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], store_page_images: bool=False, verbose: bool=False) at: prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, disable_batch: bool=False, verbose: bool=False) AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False, verbose: bool=False) OpenAIEmbeddingService(open_ai_model_name: str, credential: str, organization: Optional[str]=None, disable_batch: bool=False, verbose: bool=False) ImageEmbeddings(credential: str, endpoint: str, verbose: bool=False) at: prepdocslib.filestrategy FileStrategy(list_file_strategy: ListFileStrategy, blob_manager: BlobManager, pdf_parser: PdfParser, text_splitter: TextSplitter, document_action: DocumentAction=DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings]=None, image_embeddings: Optional[ImageEmbeddings]=None, search_analyzer_name: Optional[str]=None, use_acls: bool=False, category: Optional[str]=None) at: prepdocslib.listfilestrategy ListFileStrategy() ADLSGen2ListFileStrategy(data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: prepdocslib.pdfparser PdfParser() LocalPdfParser() DocumentAnalysisPdfParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout", verbose: bool=False) at: scripts.prepdocs is_key_empty(key) args = parser.parse_args() ===========changed ref 0=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 1=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 2=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 3=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 4=========== + # module: tests + + ===========changed ref 5=========== + # module: app.backend.core.imageshelper + + ===========changed ref 6=========== + # module: tests.mocks + + ===========changed ref 7=========== # module: tests.test_content_file - - ===========changed ref 8=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 10=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 11=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 13=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 17=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 19=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 20=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 22=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 23=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") +
scripts.prepdocslib.filestrategy/FileStrategy.__init__
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<6>:<add> self.image_embeddings = image_embeddings
<s> list_file_strategy: ListFileStrategy, blob_manager: BlobManager, pdf_parser: PdfParser, text_splitter: TextSplitter, document_action: DocumentAction = DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings] = None, + image_embeddings: Optional[ImageEmbeddings] = None, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): <0> self.list_file_strategy = list_file_strategy <1> self.blob_manager = blob_manager <2> self.pdf_parser = pdf_parser <3> self.text_splitter = text_splitter <4> self.document_action = document_action <5> self.embeddings = embeddings <6> self.search_analyzer_name = search_analyzer_name <7> self.use_acls = use_acls <8> self.category = category <9>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], store_page_images: bool=False, verbose: bool=False) at: scripts.prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, disable_batch: bool=False, verbose: bool=False) ImageEmbeddings(credential: str, endpoint: str, verbose: bool=False) at: scripts.prepdocslib.filestrategy DocumentAction() at: scripts.prepdocslib.listfilestrategy ListFileStrategy() at: scripts.prepdocslib.pdfparser PdfParser() at: scripts.prepdocslib.textsplitter TextSplitter(has_image_embeddings, verbose: bool=False) ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + """ + Class for using image embeddings from Azure AI Vision + To learn more, please visit https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/image-retrieval#call-the-vectorize-image-api + """ + ===========changed ref 1=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 2=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 4=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 5=========== + # module: tests + + ===========changed ref 6=========== + # module: app.backend.core.imageshelper + + ===========changed ref 7=========== + # module: tests.mocks + + ===========changed ref 8=========== # module: tests.test_content_file - - ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 18=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 20=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 23=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 25=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 26=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 28=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 29=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 33=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 34=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 35=========== # module: app.backend.app + @bp.route("/config", methods=["GET"]) + def config(): + return jsonify({"showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED]}) + ===========changed ref 36=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 37=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 38=========== # module: app.backend.app + class JSONEncoder(json.JSONEncoder): + def default(self, o): + if dataclasses.is_dataclass(o): + return dataclasses.asdict(o) + return super().default(o) + ===========changed ref 39=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. +
scripts.prepdocslib.filestrategy/FileStrategy.setup
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<0>:<add> search_manager = SearchManager( <add> search_info, <add> self.search_analyzer_name, <add> self.use_acls, <add> self.embeddings, <add> search_images=self.image_embeddings is not None, <add> ) <del> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, self.embeddings)
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): def setup(self, search_info: SearchInfo): <0> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, self.embeddings) <1> await search_manager.create_index() <2>
===========unchanged ref 0=========== at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str, verbose: bool=False) at: scripts.prepdocslib.strategy.Strategy setup(self, search_info: SearchInfo) ===========changed ref 0=========== <s> list_file_strategy: ListFileStrategy, blob_manager: BlobManager, pdf_parser: PdfParser, text_splitter: TextSplitter, document_action: DocumentAction = DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings] = None, + image_embeddings: Optional[ImageEmbeddings] = None, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.pdf_parser = pdf_parser self.text_splitter = text_splitter self.document_action = document_action self.embeddings = embeddings + self.image_embeddings = image_embeddings self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category ===========changed ref 1=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 2=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 4=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 5=========== + # module: tests + + ===========changed ref 6=========== + # module: app.backend.core.imageshelper + + ===========changed ref 7=========== + # module: tests.mocks + + ===========changed ref 8=========== # module: tests.test_content_file - - ===========changed ref 9=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 12=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 13=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 14=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 15=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 16=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 18=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 20=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 21=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 22=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 23=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 25=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 26=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 28=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 29=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 30=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 31=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 32=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 33=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 34=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 35=========== # module: app.backend.app + @bp.route("/config", methods=["GET"]) + def config(): + return jsonify({"showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED]}) + ===========changed ref 36=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 37=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 38=========== # module: app.backend.app + class JSONEncoder(json.JSONEncoder): + def default(self, o): + if dataclasses.is_dataclass(o): + return dataclasses.asdict(o) + return super().default(o) + ===========changed ref 39=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return self.data.pop(0) # This should be a list of dictionaries. +
scripts.prepdocslib.filestrategy/FileStrategy.run
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<12>:<add> image_embeddings: Optional[List[List[float]]] <del> await search_manager.update_content(sections) <13>:<add> blob_sas_uris = await self.blob_manager.upload_blob(file) <del> await self.blob_manager.upload_blob(file) <14>:<add> if self.image_embeddings and blob_sas_uris: <add> image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <add> await search_manager.update_content(sections, image_embeddings)
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): def run(self, search_info: SearchInfo): <0> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, self.embeddings) <1> if self.document_action == DocumentAction.Add: <2> files = self.list_file_strategy.list() <3> async for file in files: <4> try: <5> pages = [page async for page in self.pdf_parser.parse(content=file.content)] <6> if search_info.verbose: <7> print(f"Splitting '{file.filename()}' into sections") <8> sections = [ <9> Section(split_page, content=file, category=self.category) <10> for split_page in self.text_splitter.split_pages(pages) <11> ] <12> await search_manager.update_content(sections) <13> await self.blob_manager.upload_blob(file) <14> finally: <15> if file: <16> file.close() <17> elif self.document_action == DocumentAction.Remove: <18> paths = self.list_file_strategy.list_paths() <19> async for path in paths: <20> await self.blob_manager.remove_blob(path) <21> await search_manager.remove_content(path) <22> elif self.document_action == DocumentAction.RemoveAll: <23> await self.blob_manager.remove_blob() <24> await search_manager.remove_content() <25>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager upload_blob(file: File) -> Optional[List[str]] at: scripts.prepdocslib.embeddings.ImageEmbeddings create_embeddings(blob_urls: List[str]) -> List[List[float]] at: scripts.prepdocslib.filestrategy DocumentAction() at: scripts.prepdocslib.filestrategy.FileStrategy.__init__ self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.pdf_parser = pdf_parser self.text_splitter = text_splitter self.document_action = document_action self.embeddings = embeddings self.image_embeddings = image_embeddings self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category at: scripts.prepdocslib.filestrategy.FileStrategy.setup search_manager = SearchManager( search_info, self.search_analyzer_name, self.use_acls, self.embeddings, search_images=self.image_embeddings is not None, ) at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list() -> AsyncGenerator[File, None] at: scripts.prepdocslib.pdfparser.PdfParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) at: scripts.prepdocslib.searchmanager.SearchManager create_index() ===========unchanged ref 1=========== update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None) at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str, verbose: bool=False) at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.verbose = verbose at: scripts.prepdocslib.strategy.Strategy run(self, search_info: SearchInfo) at: scripts.prepdocslib.textsplitter.TextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def create_embeddings(self, blob_urls: List[str]) -> List[List[float]]: + headers = {"Ocp-Apim-Subscription-Key": self.credential} + params = {"api-version": "2023-02-01-preview", "modelVersion": "latest"} + endpoint = urljoin(self.endpoint, "computervision/retrieval:vectorizeImage") + embeddings: List[List[float]] = [] + async with aiohttp.ClientSession(headers=headers) as session: + for blob_url in blob_urls: + async for attempt in AsyncRetrying( + retry=retry_if_exception_type(Exception), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=self.before_retry_sleep, + ): + with attempt: + body = {"url": blob_url} + async with session.post(url=endpoint, params=params, json=body) as resp: + resp_json = await resp.json() + embeddings.append(resp_json["vector"]) + + return embeddings + ===========changed ref 1=========== # module: scripts.prepdocslib.searchmanager class SearchManager: + def update_content(self, sections: List[Section], image_embeddings: Optional[List[List[float]]] = None): - def update_content(self, sections: List[Section]): MAX_BATCH_SIZE = 1000 section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)] async with self.search_info.create_search_client() as search_client: for batch_index, batch in enumerate(section_batches): documents = [ { "id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}", "content": section.split_page.text, "category": section.category, + "sourcepage": BlobManager.blob_image_name_from_file_page( + filename=section.content.filename(), page=section.split_page.page_num + ) + if image_embeddings + else BlobManager.sourcepage_from_file_page( - "sourcepage": BlobManager.sourcepage_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ), "sourcefile": section.content.filename(), **section.content.acls, } for section_index, section in enumerate(batch) ] if self.embeddings: embeddings = await self.embeddings.create_embeddings( texts=[section.split_page.text for section in batch] ) for i, document in enumerate(documents): document["embedding"] = embeddings[i] + if image_embeddings: + for i, (document, section) in enumerate(zip(documents, batch)): + document["imageEmbedding"] = image_embeddings[section.split_page.page_num] await search_client.upload_documents(documents)
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer: str): <2> chunk_id = "test-id" <3> model = "gpt-35-turbo" <4> self.responses = [ <5> {"object": "chat.completion.chunk", "choices": [], "id": chunk_id, "model": model, "created": 1}, <6> { <7> "object": "chat.completion.chunk", <8> "choices": [{"delta": {"role": "assistant"}, "index": 0, "finish_reason": None}], <9> "id": chunk_id, <10> "model": model, <11> "created": 1, <12> }, <13> ] <14> # Split at << to simulate chunked responses <15> if answer.find("<<") > -1: <16> parts = answer.split("<<") <17> self.responses.append( <18> { <19> "object": "chat.completion.chunk", <20> "choices": [ <21> { <22> "delta": {"role": "assistant", "content": parts[0] + "<<"}, <23> "index": 0, <24> "finish_reason": None, <25> } <26> ], <27> "id": chunk_id, <28> "model": model, <29> "created": 1, <30> } <31> ) <32> self.responses.append( <33> { <34> "object": "chat.completion.chunk", <35> "choices": [ <36> {"delta": {"role": "assistant", "content": parts[1]}, "index": 0, "finish_reason": None} <37> ], <38> "id": chunk_id, <39> "model": model, <40> "created": 1, <41> } <42> ) <43> self.responses.append( <44> { <45> "object": "chat.completion.chunk", <46> "choices": [{"delta": {"role": None, "content":</s>
===========below chunk 0=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 1 "id": chunk_id, "model": model, "created": 1, } ) else: self.responses.append( { "object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}, "index": 0, "finish_reason": None}], "id": chunk_id, "model": model, "created": 1, } ) def __aiter__(self): return self async def __anext__(self): if self.responses: return ChatCompletionChunk.model_validate(self.responses.pop(0)) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): messages = kwargs["messages"] if messages[-1]["content"] == "Generate search query for: What is the capital of France?": answer = "capital of France" else: answer = "The capital of France is Paris. [Benefit_Options-2.pdf]." if messages[0]["content"].find("Generate 3 very brief follow-up questions") > -1: answer = "The capital of France is Paris. [Benefit_Options-2.pdf]. <<What is the capital of Spain?>>" if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return ChatCompletion( object="chat.completion", choices=[ Choice( message=ChatCompletionMessage(role="assistant", content=answer), finish_reason="stop", index=0 ) ], id="test-123", created=0, model="test-model", ) def patch(openai_client): monkeypatch.setattr(openai_client.chat.completions, "create", mock_acreate) </s> ===========below chunk 1=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 2 <s>client): monkeypatch.setattr(openai_client.chat.completions, "create", mock_acreate) return patch ===========changed ref 0=========== # module: tests.conftest - class MockAzureCredential(AsyncTokenCredential): - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 1=========== # module: tests.conftest + @pytest.fixture + def mock_get_secret(monkeypatch): + monkeypatch.setattr(SecretClient, "get_secret", MockKeyVaultSecretClient().get_secret) + ===========changed ref 2=========== # module: tests.conftest - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 3=========== # module: tests.conftest + def mock_search(self, *args, **kwargs): + self.filter = kwargs.get("filter") + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) + ===========changed ref 4=========== # module: tests.conftest + @pytest.fixture + def mock_compute_embeddings_call(monkeypatch): + def mock_post(*args, **kwargs): + if kwargs.get("url").endswith("computervision/retrieval:vectorizeText"): + return mock_computervision_response() + else: + raise Exception("Unexpected URL for mock call to ClientSession.post()") + + monkeypatch.setattr(aiohttp.ClientSession, "post", mock_post) + ===========changed ref 5=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 6=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 7=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 8=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 9=========== + # module: tests + + ===========changed ref 10=========== + # module: app.backend.core.imageshelper + + ===========changed ref 11=========== + # module: tests.mocks + + ===========changed ref 12=========== # module: tests.test_content_file - - ===========changed ref 13=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 14=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 15=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 21=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 22=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 23=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 25=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 26=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 28=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 29=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 30=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 31=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 32=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 33=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 34=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose +
tests.conftest/mock_acs_search
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<0>:<del> class Caption: <1>:<del> def __init__(self, text): <2>:<del> self.text = text <3>:<del> <4>:<del> class AsyncSearchResultsIterator: <5>:<del> def __init__(self): <6>:<del> self.num = 1 <7>:<del> <8>:<del> def __aiter__(self): <9>:<del> return self <10>:<del> <11>:<del> async def __anext__(self): <12>:<del> if self.num == 1: <13>:<del> self.num = 0 <14>:<del> return { <15>:<del> "sourcepage": "Benefit_Options-2.pdf", <16>:<del> "sourcefile": "Benefit_Options.pdf", <17>:<del> "content": "There is a whistleblower policy.", <18>:<del> "embeddings": [], <19>:<del> "category": None, <20>:<del> "id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", <21>:<del> "@search.score": 0.03279569745063782, <22>:<del> "@search.reranker_score": 3.4577205181121826, <23>:<del> "@search.highlights": None, <24>:<del> "@search.captions": [Caption("Caption: A whistleblower policy.")], <25>:<del> } <26>:<del> else: <27>:<del> raise StopAsyncIteration <28>:<del> <29>:<del> async def mock_search(*args, **kwargs): <30>:<del> return AsyncSearchResultsIterator() <31>:<del> <32>:<add> monkeypatch.setattr(SearchClient, "search", mock_search)
# module: tests.conftest @pytest.fixture def mock_acs_search(monkeypatch): <0> class Caption: <1> def __init__(self, text): <2> self.text = text <3> <4> class AsyncSearchResultsIterator: <5> def __init__(self): <6> self.num = 1 <7> <8> def __aiter__(self): <9> return self <10> <11> async def __anext__(self): <12> if self.num == 1: <13> self.num = 0 <14> return { <15> "sourcepage": "Benefit_Options-2.pdf", <16> "sourcefile": "Benefit_Options.pdf", <17> "content": "There is a whistleblower policy.", <18> "embeddings": [], <19> "category": None, <20> "id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", <21> "@search.score": 0.03279569745063782, <22> "@search.reranker_score": 3.4577205181121826, <23> "@search.highlights": None, <24> "@search.captions": [Caption("Caption: A whistleblower policy.")], <25> } <26> else: <27> raise StopAsyncIteration <28> <29> async def mock_search(*args, **kwargs): <30> return AsyncSearchResultsIterator() <31> <32> monkeypatch.setattr(SearchClient, "search", mock_search) <33>
===========changed ref 0=========== # module: tests.conftest - class MockAzureCredential(AsyncTokenCredential): - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 1=========== # module: tests.conftest + @pytest.fixture + def mock_get_secret(monkeypatch): + monkeypatch.setattr(SecretClient, "get_secret", MockKeyVaultSecretClient().get_secret) + ===========changed ref 2=========== # module: tests.conftest - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 3=========== # module: tests.conftest + def mock_search(self, *args, **kwargs): + self.filter = kwargs.get("filter") + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) + ===========changed ref 4=========== # module: tests.conftest + @pytest.fixture + def mock_compute_embeddings_call(monkeypatch): + def mock_post(*args, **kwargs): + if kwargs.get("url").endswith("computervision/retrieval:vectorizeText"): + return mock_computervision_response() + else: + raise Exception("Unexpected URL for mock call to ClientSession.post()") + + monkeypatch.setattr(aiohttp.ClientSession, "post", mock_post) + ===========changed ref 5=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 6=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 7=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 8=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 9=========== + # module: tests + + ===========changed ref 10=========== + # module: app.backend.core.imageshelper + + ===========changed ref 11=========== + # module: tests.mocks + + ===========changed ref 12=========== # module: tests.test_content_file - - ===========changed ref 13=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 14=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 15=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 16=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 18=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 21=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 22=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 23=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 24=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 25=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 26=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 27=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 28=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 29=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 30=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 31=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 32=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 33=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 34=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 35=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 36=========== # module: app.backend.approaches.approach + @dataclass + class ThoughtStep: + title: str + description: Optional[Any] + props: Optional[dict[str, Any]] = None + ===========changed ref 37=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") + ===========changed ref 38=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 39=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 40=========== # module: app.backend.app + @bp.route("/config", methods=["GET"]) + def config(): + return jsonify({"showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED]}) + ===========changed ref 41=========== + # module: tests.mocks + class MockCaption: + def __init__(self, text, highlights=None, additional_properties=None): + self.text = text + self.highlights = highlights or [] + self.additional_properties = additional_properties or {} + ===========changed ref 42=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_image_name_from_file_page(cls, filename, page=0) -> str: + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".png" + ===========changed ref 43=========== # module: app.backend.app + class JSONEncoder(json.JSONEncoder): + def default(self, o): + if dataclasses.is_dataclass(o): + return dataclasses.asdict(o) + return super().default(o) +
tests.conftest/mock_acs_search_filter
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<0>:<del> class AsyncSearchResultsIterator: <1>:<del> def __init__(self): <2>:<del> self.num = 1 <3>:<del> <4>:<del> def __aiter__(self): <5>:<del> return self <6>:<del> <7>:<del> async def __anext__(self): <8>:<del> raise StopAsyncIteration <9>:<del> <10>:<del> async def mock_search(self, *args, **kwargs): <11>:<del> self.filter = kwargs.get("filter") <12>:<del> return AsyncSearchResultsIterator() <13>:<del>
# module: tests.conftest @pytest.fixture def mock_acs_search_filter(monkeypatch): <0> class AsyncSearchResultsIterator: <1> def __init__(self): <2> self.num = 1 <3> <4> def __aiter__(self): <5> return self <6> <7> async def __anext__(self): <8> raise StopAsyncIteration <9> <10> async def mock_search(self, *args, **kwargs): <11> self.filter = kwargs.get("filter") <12> return AsyncSearchResultsIterator() <13> <14> monkeypatch.setattr(SearchClient, "search", mock_search) <15>
===========changed ref 0=========== # module: tests.conftest - class MockAzureCredential(AsyncTokenCredential): - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 1=========== # module: tests.conftest + @pytest.fixture + def mock_get_secret(monkeypatch): + monkeypatch.setattr(SecretClient, "get_secret", MockKeyVaultSecretClient().get_secret) + ===========changed ref 2=========== # module: tests.conftest - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 3=========== # module: tests.conftest + def mock_search(self, *args, **kwargs): + self.filter = kwargs.get("filter") + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) + ===========changed ref 4=========== # module: tests.conftest + @pytest.fixture + def mock_compute_embeddings_call(monkeypatch): + def mock_post(*args, **kwargs): + if kwargs.get("url").endswith("computervision/retrieval:vectorizeText"): + return mock_computervision_response() + else: + raise Exception("Unexpected URL for mock call to ClientSession.post()") + + monkeypatch.setattr(aiohttp.ClientSession, "post", mock_post) + ===========changed ref 5=========== # module: tests.conftest @pytest.fixture def mock_acs_search(monkeypatch): - class Caption: - def __init__(self, text): - self.text = text - - class AsyncSearchResultsIterator: - def __init__(self): - self.num = 1 - - def __aiter__(self): - return self - - async def __anext__(self): - if self.num == 1: - self.num = 0 - return { - "sourcepage": "Benefit_Options-2.pdf", - "sourcefile": "Benefit_Options.pdf", - "content": "There is a whistleblower policy.", - "embeddings": [], - "category": None, - "id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", - "@search.score": 0.03279569745063782, - "@search.reranker_score": 3.4577205181121826, - "@search.highlights": None, - "@search.captions": [Caption("Caption: A whistleblower policy.")], - } - else: - raise StopAsyncIteration - - async def mock_search(*args, **kwargs): - return AsyncSearchResultsIterator() - + monkeypatch.setattr(SearchClient, "search", mock_search) monkeypatch.setattr(SearchClient, "search", mock_search) ===========changed ref 6=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 7=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 8=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 9=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 10=========== + # module: tests + + ===========changed ref 11=========== + # module: app.backend.core.imageshelper + + ===========changed ref 12=========== + # module: tests.mocks + + ===========changed ref 13=========== # module: tests.test_content_file - - ===========changed ref 14=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 15=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 16=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 17=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 18=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 19=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 20=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 21=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 22=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 23=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 24=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 25=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value + ===========changed ref 26=========== + # module: tests.test_chatvisionapproach + @pytest.fixture + def openai_client(): + return MockOpenAIClient() + ===========changed ref 27=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def __init__(self): + self.embeddings = self + ===========changed ref 28=========== + # module: tests.mocks + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 29=========== + # module: tests.mocks + class MockKeyVaultSecretClient: + def get_secret(self, secret_name): + return MockKeyVaultSecret("mysecret") + ===========changed ref 30=========== + # module: tests.mocks + class MockAzureCredential(AsyncTokenCredential): + def get_token(self, uri): + return MockToken("", 9999999999, "") + ===========changed ref 31=========== # module: tests.test_content_file - class MockAzureCredential: - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 32=========== + # module: tests.mocks + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 33=========== # module: tests.test_content_file - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 34=========== + # module: tests.mocks + MockToken = namedtuple("MockToken", ["token", "expires_on", "value"]) + ===========changed ref 35=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def __init__(self, credential: str, endpoint: str, verbose: bool = False): + self.credential = credential + self.endpoint = endpoint + self.verbose = verbose + ===========changed ref 36=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_client(self) -> AsyncOpenAI: - return AsyncOpenAI(api_key=self.credential, organization=self.organization) - ===========changed ref 37=========== # module: app.backend.approaches.approach + @dataclass + class ThoughtStep: + title: str + description: Optional[Any] + props: Optional[dict[str, Any]] = None + ===========changed ref 38=========== # module: scripts.prepdocslib.embeddings + class ImageEmbeddings: + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the Vision embeddings API, sleeping before retrying...") +
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
36015895f14c8569e3a3989fcf5744a9b5bd4845
Integrate GPT4-vision support (#1056)
<3>:<add> test_app.app.config.update({"TESTING": True}) <del> quart_app.config.update({"TESTING": True})
<s>ftest @pytest_asyncio.fixture() + async def client( + monkeypatch, + mock_env, + mock_openai_chatcompletion, + mock_openai_embedding, + mock_acs_search, + mock_blob_container_client, + mock_compute_embeddings_call, + ): - async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): <0> quart_app = app.create_app() <1> <2> async with quart_app.test_app() as test_app: <3> quart_app.config.update({"TESTING": True}) <4> mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <5> mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) <6> yield test_app.test_client() <7>
===========changed ref 0=========== # module: tests.conftest + @pytest.fixture + def mock_blob_container_client(monkeypatch): + monkeypatch.setattr(ContainerClient, "get_blob_client", lambda *args, **kwargs: MockBlobClient()) + ===========changed ref 1=========== # module: tests.conftest @pytest.fixture def mock_acs_search_filter(monkeypatch): - class AsyncSearchResultsIterator: - def __init__(self): - self.num = 1 - - def __aiter__(self): - return self - - async def __anext__(self): - raise StopAsyncIteration - - async def mock_search(self, *args, **kwargs): - self.filter = kwargs.get("filter") - return AsyncSearchResultsIterator() - monkeypatch.setattr(SearchClient, "search", mock_search) ===========changed ref 2=========== # module: tests.conftest - class MockAzureCredential(AsyncTokenCredential): - def get_token(self, uri): - return MockToken("mock_token", 9999999999) - ===========changed ref 3=========== # module: tests.conftest + @pytest.fixture + def mock_get_secret(monkeypatch): + monkeypatch.setattr(SecretClient, "get_secret", MockKeyVaultSecretClient().get_secret) + ===========changed ref 4=========== # module: tests.conftest - MockToken = namedtuple("MockToken", ["token", "expires_on"]) - ===========changed ref 5=========== # module: tests.conftest + def mock_search(self, *args, **kwargs): + self.filter = kwargs.get("filter") + return MockAsyncSearchResultsIterator(kwargs.get("search_text"), kwargs.get("vector_queries")) + ===========changed ref 6=========== # module: tests.conftest + @pytest.fixture + def mock_compute_embeddings_call(monkeypatch): + def mock_post(*args, **kwargs): + if kwargs.get("url").endswith("computervision/retrieval:vectorizeText"): + return mock_computervision_response() + else: + raise Exception("Unexpected URL for mock call to ClientSession.post()") + + monkeypatch.setattr(aiohttp.ClientSession, "post", mock_post) + ===========changed ref 7=========== # module: tests.conftest envs = [ { "OPENAI_HOST": "openai", "OPENAI_API_KEY": "secretkey", "OPENAI_ORGANIZATION": "organization", }, { "OPENAI_HOST": "azure", "AZURE_OPENAI_SERVICE": "test-openai-service", "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + "AZURE_OPENAI_GPT4V_MODEL": "gpt-4", + "VISION_SECRET_NAME": "mysecret", + "VISION_ENDPOINT": "https://testvision.cognitiveservices.azure.com/", + "AZURE_KEY_VAULT_NAME": "mykeyvault", }, ] auth_envs = [ { "OPENAI_HOST": "azure", "AZURE_OPENAI_SERVICE": "test-openai-service", "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", "AZURE_USE_AUTHENTICATION": "true", "AZURE_SERVER_APP_ID": "SERVER_APP", "AZURE_SERVER_APP_SECRET": "SECRET", "AZURE_CLIENT_APP_ID": "CLIENT_APP", "AZURE_TENANT_ID": "TENANT_ID", }, ] ===========changed ref 8=========== # module: tests.conftest @pytest.fixture def mock_acs_search(monkeypatch): - class Caption: - def __init__(self, text): - self.text = text - - class AsyncSearchResultsIterator: - def __init__(self): - self.num = 1 - - def __aiter__(self): - return self - - async def __anext__(self): - if self.num == 1: - self.num = 0 - return { - "sourcepage": "Benefit_Options-2.pdf", - "sourcefile": "Benefit_Options.pdf", - "content": "There is a whistleblower policy.", - "embeddings": [], - "category": None, - "id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", - "@search.score": 0.03279569745063782, - "@search.reranker_score": 3.4577205181121826, - "@search.highlights": None, - "@search.captions": [Caption("Caption: A whistleblower policy.")], - } - else: - raise StopAsyncIteration - - async def mock_search(*args, **kwargs): - return AsyncSearchResultsIterator() - + monkeypatch.setattr(SearchClient, "search", mock_search) monkeypatch.setattr(SearchClient, "search", mock_search) ===========changed ref 9=========== + # module: app.backend.approaches.chatreadretrievereadvision + + ===========changed ref 10=========== + # module: app.backend.approaches.chatapproach + + ===========changed ref 11=========== + # module: tests.test_chatvisionapproach + + ===========changed ref 12=========== + # module: app.backend.approaches.retrievethenreadvision + + ===========changed ref 13=========== + # module: tests + + ===========changed ref 14=========== + # module: app.backend.core.imageshelper + + ===========changed ref 15=========== + # module: tests.mocks + + ===========changed ref 16=========== # module: tests.test_content_file - - ===========changed ref 17=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @abstractmethod + async def run_until_final_call(self, history, overrides, auth_claims, should_stream) -> tuple: + pass + ===========changed ref 18=========== + # module: app.backend.approaches.chatapproach + class ChatApproach(Approach, ABC): + @property + @abstractmethod + def system_message_chat_conversation(self) -> str: + pass + ===========changed ref 19=========== + # module: tests.test_chatvisionapproach + class MockOpenAIClient: + def create(self, *args, **kwargs): + pass + ===========changed ref 20=========== + # module: tests.mocks + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 21=========== + # module: tests.mocks + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 22=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def by_page(self): + return self + ===========changed ref 23=========== + # module: tests.mocks + class MockAsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 24=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __aiter__(self): + return self + ===========changed ref 25=========== + # module: tests.mocks + class MockResponse: + def text(self): + return self._text + ===========changed ref 26=========== + # module: tests.mocks + class MockBlobClient: + def download_blob(self): + return MockBlob() + ===========changed ref 27=========== + # module: tests.mocks + class MockAsyncPageIterator: + def __init__(self, data): + self.data = data + ===========changed ref 28=========== + # module: tests.mocks + class MockKeyVaultSecret: + def __init__(self, value): + self.value = value +
tests.test_chatvisionapproach/chat_approach
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> auth_helper=AuthenticationHelper( <add> search_index=MockSearchIndex, <add> use_authentication=True, <add> server_app_id="SERVER_APP", <add> server_app_secret="SERVER_SECRET", <add> client_app_id="CLIENT_APP", <add> tenant_id="TENANT_ID", <add> require_access_control=None, <add> ),
# module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): <0> return ChatReadRetrieveReadVisionApproach( <1> search_client=None, <2> openai_client=openai_client, <3> blob_container_client=None, <4> vision_endpoint="endpoint", <5> vision_key="key", <6> gpt4v_deployment="gpt-4v", <7> gpt4v_model="gpt-4v", <8> embedding_deployment="embeddings", <9> embedding_model="text-", <10> sourcepage_field="", <11> content_field="", <12> query_language="en-us", <13> query_speller="lexicon", <14> ) <15>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: tests.conftest mock_confidential_client_success(monkeypatch) at: tests.test_chatvisionapproach MockOpenAIClient() ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 3=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 4=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 5=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True +
app.backend.core.authentication/AuthenticationHelper.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<8>:<del> self.token_cache_path = token_cache_path <9>:<del> if not self.token_cache_path: <10>:<del> self.temporary_directory = TemporaryDirectory() <11>:<del> self.token_cache_path = os.path.join(self.temporary_directory.name, "token_cache.bin") <12>:<del> try: <13>:<del> persistence = build_encrypted_persistence(location=self.token_cache_path) <14>:<del> except Exception: <15>:<del> logging.exception("Encryption unavailable. Opting in to plain text.") <16>:<del> persistence = FilePersistence(location=self.token_cache_path) <17>:<add> field_names = [field.name for field in search_index.fields] if search_index else [] <add> self.has_auth_fields = "oids" in field_names and "groups" in field_names <add> self.require_access_control = require_access_control <18>:<del> server_app_id, <19>:<del> authority=self.authority, <20>:<del> client_credential=server_app_secret, <21>:<del> token_cache=PersistedTokenCache(persistence), <22>:<add> server_app_id, authority=self.authority, client_credential=server_app_secret, token_cache=TokenCache() <23>:<add> else: <add> self.has_auth_fields = False <add> self.require_access_control = False
# module: app.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): <0> self.use_authentication = use_authentication <1> self.server_app_id = server_app_id <2> self.server_app_secret = server_app_secret <3> self.client_app_id = client_app_id <4> self.tenant_id = tenant_id <5> self.authority = f"https://login.microsoftonline.com/{tenant_id}" <6> <7> if self.use_authentication: <8> self.token_cache_path = token_cache_path <9> if not self.token_cache_path: <10> self.temporary_directory = TemporaryDirectory() <11> self.token_cache_path = os.path.join(self.temporary_directory.name, "token_cache.bin") <12> try: <13> persistence = build_encrypted_persistence(location=self.token_cache_path) <14> except Exception: <15> logging.exception("Encryption unavailable. Opting in to plain text.") <16> persistence = FilePersistence(location=self.token_cache_path) <17> self.confidential_client = ConfidentialClientApplication( <18> server_app_id, <19> authority=self.authority, <20> client_credential=server_app_secret, <21> token_cache=PersistedTokenCache(persistence), <22> ) <23>
===========changed ref 0=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 1=========== + # module: scripts.auth_common + + ===========changed ref 2=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 3=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 4=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 5=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 6=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 7=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 8=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", )
app.backend.core.authentication/AuthenticationHelper.get_auth_setup_for_client
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> "requireAccessControl": self.require_access_control, # Whether or not access control is required to use the application
# module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: <0> # returns MSAL.js settings used by the client app <1> return { <2> "useLogin": self.use_authentication, # Whether or not login elements are enabled on the UI <3> "msalConfig": { <4> "auth": { <5> "clientId": self.client_app_id, # Client app id used for login <6> "authority": self.authority, # Directory to use for login https://learn.microsoft.com/azure/active-directory/develop/msal-client-application-configuration#authority <7> "redirectUri": "/redirect", # Points to window.location.origin. You must register this URI on Azure Portal/App Registration. <8> "postLogoutRedirectUri": "/", # Indicates the page to navigate after logout. <9> "navigateToLoginRequestUrl": False, # If "true", will navigate back to the original request location before processing the auth code response. <10> }, <11> "cache": { <12> "cacheLocation": "sessionStorage", <13> "storeAuthStateInCookie": False, <14> }, # Configures cache location. "sessionStorage" is more secure, but "localStorage" gives you SSO between tabs. # Set this to "true" if you are having issues on IE11 or Edge <15> }, <16> "loginRequest": { <17> # Scopes you add here will be prompted for user consent during sign-in. <18> # By default, MSAL.js will add OIDC scopes (openid, profile, email) to any login request. <19> # For more information about OIDC scopes, visit: <20> # https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent#openid-connect-scopes <21> "scopes": [".default"], <22> # Uncomment the following line to cause a consent dialog to appear on every login <23> # For more information, please visit https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-auth-code-</s>
===========below chunk 0=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: # offset: 1 # "prompt": "consent" }, "tokenRequest": { "scopes": [f"api://{self.server_app_id}/access_as_user"], }, } ===========changed ref 0=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 1=========== # module: app.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): self.use_authentication = use_authentication self.server_app_id = server_app_id self.server_app_secret = server_app_secret self.client_app_id = client_app_id self.tenant_id = tenant_id self.authority = f"https://login.microsoftonline.com/{tenant_id}" if self.use_authentication: - self.token_cache_path = token_cache_path - if not self.token_cache_path: - self.temporary_directory = TemporaryDirectory() - self.token_cache_path = os.path.join(self.temporary_directory.name, "token_cache.bin") - try: - persistence = build_encrypted_persistence(location=self.token_cache_path) - except Exception: - logging.exception("Encryption unavailable. Opting in to plain text.") - persistence = FilePersistence(location=self.token_cache_path) + field_names = [field.name for field in search_index.fields] if search_index else [] + self.has_auth_fields = "oids" in field_names and "groups" in field_names + self.require_access_control = require_access_control self.confidential_client = ConfidentialClientApplication( - server_app_id, - authority=self.authority, - client_credential=server_app_secret, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority,</s> ===========changed ref 2=========== <s>.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): # offset: 1 <s>, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority, client_credential=server_app_secret, token_cache=TokenCache() ) + else: + self.has_auth_fields = False + self.require_access_control = False ===========changed ref 3=========== + # module: scripts.auth_common + + ===========changed ref 4=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 5=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 8=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 9=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 10=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", )
app.backend.core.authentication/AuthenticationHelper.get_token_auth_header
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<2>:<add> if auth: <del> if not auth: <3>:<del> raise AuthError( <4>:<del> {"code": "authorization_header_missing", "description": "Authorization header is expected"}, 401 <5>:<del> ) <6>:<add> parts = auth.split() <7>:<add> if parts[0].lower() != "bearer": <add> raise AuthError(error="Authorization header must start with Bearer", status_code=401) <add> elif len(parts) == 1: <add> raise AuthError(error="Token not found", status_code=401) <add> elif len(parts) > 2: <add> raise AuthError(error="Authorization header must be Bearer token", status_code=401) <del> parts = auth.split() <9>:<del> if parts[0].lower() != "bearer": <10>:<del> raise AuthError( <11>:<del> {"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401 <12>:<del> ) <13>:<del> elif len(parts) == 1: <14>:<del> raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401) <15>:<del> elif len(parts) > 2: <16>:<del> raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401) <17>:<add> token = parts[1] <add> return token <18>:<add> # App services built-in authentication passes the access token directly as a header <add> # To learn more, please visit https://learn.microsoft.com/azure/app-service/configure-authentication-oauth-tokens <add> token = headers.get("x-ms-token-aad-access-token", None) <add> if token: <del> token = parts[1] <19>:<add> return token <del> return token
# module: app.backend.core.authentication class AuthenticationHelper: @staticmethod def get_token_auth_header(headers: dict) -> str: <0> # Obtains the Access Token from the Authorization Header <1> auth = headers.get("Authorization", None) <2> if not auth: <3> raise AuthError( <4> {"code": "authorization_header_missing", "description": "Authorization header is expected"}, 401 <5> ) <6> <7> parts = auth.split() <8> <9> if parts[0].lower() != "bearer": <10> raise AuthError( <11> {"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401 <12> ) <13> elif len(parts) == 1: <14> raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401) <15> elif len(parts) > 2: <16> raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401) <17> <18> token = parts[1] <19> return token <20>
===========unchanged ref 0=========== at: app.backend.core.authentication AuthError(error, status_code) ===========changed ref 0=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 1=========== # module: app.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): self.use_authentication = use_authentication self.server_app_id = server_app_id self.server_app_secret = server_app_secret self.client_app_id = client_app_id self.tenant_id = tenant_id self.authority = f"https://login.microsoftonline.com/{tenant_id}" if self.use_authentication: - self.token_cache_path = token_cache_path - if not self.token_cache_path: - self.temporary_directory = TemporaryDirectory() - self.token_cache_path = os.path.join(self.temporary_directory.name, "token_cache.bin") - try: - persistence = build_encrypted_persistence(location=self.token_cache_path) - except Exception: - logging.exception("Encryption unavailable. Opting in to plain text.") - persistence = FilePersistence(location=self.token_cache_path) + field_names = [field.name for field in search_index.fields] if search_index else [] + self.has_auth_fields = "oids" in field_names and "groups" in field_names + self.require_access_control = require_access_control self.confidential_client = ConfidentialClientApplication( - server_app_id, - authority=self.authority, - client_credential=server_app_secret, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority,</s> ===========changed ref 2=========== <s>.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): # offset: 1 <s>, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority, client_credential=server_app_secret, token_cache=TokenCache() ) + else: + self.has_auth_fields = False + self.require_access_control = False ===========changed ref 3=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: # returns MSAL.js settings used by the client app return { "useLogin": self.use_authentication, # Whether or not login elements are enabled on the UI + "requireAccessControl": self.require_access_control, # Whether or not access control is required to use the application "msalConfig": { "auth": { "clientId": self.client_app_id, # Client app id used for login "authority": self.authority, # Directory to use for login https://learn.microsoft.com/azure/active-directory/develop/msal-client-application-configuration#authority "redirectUri": "/redirect", # Points to window.location.origin. You must register this URI on Azure Portal/App Registration. "postLogoutRedirectUri": "/", # Indicates the page to navigate after logout. "navigateToLoginRequestUrl": False, # If "true", will navigate back to the original request location before processing the auth code response. }, "cache": { "cacheLocation": "sessionStorage", "storeAuthStateInCookie": False, }, # Configures cache location. "sessionStorage" is more secure, but "localStorage" gives you SSO between tabs. # Set this to "true" if you are having issues on IE11 or Edge }, "loginRequest": { # Scopes you add here will be prompted for user consent during sign-in. # By default, MSAL.js will add OIDC scopes (openid, profile, email) to any login request. # For more information about OIDC scopes, visit: # https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent#openid-connect-scopes "scopes": [".default"], # Uncomment the following line to cause a consent dialog to appear on every login # For more information, please visit https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-</s> ===========changed ref 4=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: # offset: 1 <s> # For more information, please visit https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-auth-code-flow#request-an-authorization-code # "prompt": "consent" }, "tokenRequest": { "scopes": [f"api://{self.server_app_id}/access_as_user"], }, } ===========changed ref 5=========== + # module: scripts.auth_common + + ===========changed ref 6=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 7=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 8=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True +
app.backend.core.authentication/AuthenticationHelper.build_security_filters
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> use_oid_security_filter = self.require_access_control or overrides.get("use_oid_security_filter") <del> use_oid_security_filter = overrides.get("use_oid_security_filter") <4>:<add> use_groups_security_filter = self.require_access_control or overrides.get("use_groups_security_filter") <del> use_groups_security_filter = overrides.get("use_groups_security_filter") <5>:<add> <add> if (use_oid_security_filter or use_oid_security_filter) and not self.has_auth_fields: <add> raise AuthError( <add> error="oids and groups must be defined in the search index to use authentication", status_code=400 <add> )
# module: app.backend.core.authentication class AuthenticationHelper: - @staticmethod + def build_security_filters(self, overrides: dict[str, Any], auth_claims: dict[str, Any]): - def build_security_filters(overrides: dict[str, Any], auth_claims: dict[str, Any]): <0> # Build different permutations of the oid or groups security filter using OData filters <1> # https://learn.microsoft.com/azure/search/search-security-trimming-for-azure-search <2> # https://learn.microsoft.com/azure/search/search-query-odata-filter <3> use_oid_security_filter = overrides.get("use_oid_security_filter") <4> use_groups_security_filter = overrides.get("use_groups_security_filter") <5> <6> oid_security_filter = ( <7> "oids/any(g:search.in(g, '{}'))".format(auth_claims.get("oid") or "") if use_oid_security_filter else None <8> ) <9> groups_security_filter = ( <10> "groups/any(g:search.in(g, '{}'))".format(", ".join(auth_claims.get("groups") or [])) <11> if use_groups_security_filter <12> else None <13> ) <14> <15> # If only one security filter is specified, return that filter <16> # If both security filters are specified, combine them with "or" so only 1 security filter needs to pass <17> # If no security filters are specified, don't return any filter <18> if oid_security_filter and not groups_security_filter: <19> return oid_security_filter <20> elif groups_security_filter and not oid_security_filter: <21> return groups_security_filter <22> elif oid_security_filter and groups_security_filter: <23> return f"({oid_security_filter} or {groups_security_filter})" <24> else: <25> return None <26>
===========unchanged ref 0=========== at: app.backend.core.authentication AuthError(error, status_code) ===========changed ref 0=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 1=========== # module: app.backend.core.authentication class AuthenticationHelper: @staticmethod def get_token_auth_header(headers: dict) -> str: # Obtains the Access Token from the Authorization Header auth = headers.get("Authorization", None) + if auth: - if not auth: - raise AuthError( - {"code": "authorization_header_missing", "description": "Authorization header is expected"}, 401 - ) + parts = auth.split() + if parts[0].lower() != "bearer": + raise AuthError(error="Authorization header must start with Bearer", status_code=401) + elif len(parts) == 1: + raise AuthError(error="Token not found", status_code=401) + elif len(parts) > 2: + raise AuthError(error="Authorization header must be Bearer token", status_code=401) - parts = auth.split() - if parts[0].lower() != "bearer": - raise AuthError( - {"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401 - ) - elif len(parts) == 1: - raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401) - elif len(parts) > 2: - raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401) + token = parts[1] + return token + # App services built-in authentication passes the access token directly as a header + # To learn more, please visit https://learn.microsoft.com/azure/app-service/configure-authentication-oauth-tokens + token = headers.get("x-ms-token-aad-access-token", None) + if token: - token = parts[1] + return token - return token ===========changed ref 2=========== # module: app.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): self.use_authentication = use_authentication self.server_app_id = server_app_id self.server_app_secret = server_app_secret self.client_app_id = client_app_id self.tenant_id = tenant_id self.authority = f"https://login.microsoftonline.com/{tenant_id}" if self.use_authentication: - self.token_cache_path = token_cache_path - if not self.token_cache_path: - self.temporary_directory = TemporaryDirectory() - self.token_cache_path = os.path.join(self.temporary_directory.name, "token_cache.bin") - try: - persistence = build_encrypted_persistence(location=self.token_cache_path) - except Exception: - logging.exception("Encryption unavailable. Opting in to plain text.") - persistence = FilePersistence(location=self.token_cache_path) + field_names = [field.name for field in search_index.fields] if search_index else [] + self.has_auth_fields = "oids" in field_names and "groups" in field_names + self.require_access_control = require_access_control self.confidential_client = ConfidentialClientApplication( - server_app_id, - authority=self.authority, - client_credential=server_app_secret, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority,</s> ===========changed ref 3=========== <s>.backend.core.authentication class AuthenticationHelper: def __init__( self, + search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], + require_access_control: bool = False, - token_cache_path: Optional[str] = None, ): # offset: 1 <s>, - token_cache=PersistedTokenCache(persistence), + server_app_id, authority=self.authority, client_credential=server_app_secret, token_cache=TokenCache() ) + else: + self.has_auth_fields = False + self.require_access_control = False ===========changed ref 4=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: # returns MSAL.js settings used by the client app return { "useLogin": self.use_authentication, # Whether or not login elements are enabled on the UI + "requireAccessControl": self.require_access_control, # Whether or not access control is required to use the application "msalConfig": { "auth": { "clientId": self.client_app_id, # Client app id used for login "authority": self.authority, # Directory to use for login https://learn.microsoft.com/azure/active-directory/develop/msal-client-application-configuration#authority "redirectUri": "/redirect", # Points to window.location.origin. You must register this URI on Azure Portal/App Registration. "postLogoutRedirectUri": "/", # Indicates the page to navigate after logout. "navigateToLoginRequestUrl": False, # If "true", will navigate back to the original request location before processing the auth code response. }, "cache": { "cacheLocation": "sessionStorage", "storeAuthStateInCookie": False, }, # Configures cache location. "sessionStorage" is more secure, but "localStorage" gives you SSO between tabs. # Set this to "true" if you are having issues on IE11 or Edge }, "loginRequest": { # Scopes you add here will be prompted for user consent during sign-in. # By default, MSAL.js will add OIDC scopes (openid, profile, email) to any login request. # For more information about OIDC scopes, visit: # https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent#openid-connect-scopes "scopes": [".default"], # Uncomment the following line to cause a consent dialog to appear on every login # For more information, please visit https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-</s> ===========changed ref 5=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_setup_for_client(self) -> dict[str, Any]: # offset: 1 <s> # For more information, please visit https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-auth-code-flow#request-an-authorization-code # "prompt": "consent" }, "tokenRequest": { "scopes": [f"api://{self.server_app_id}/access_as_user"], }, } ===========changed ref 6=========== + # module: scripts.auth_common + + ===========changed ref 7=========== + # module: scripts.auth_common + TIMEOUT = 60 +
app.backend.core.authentication/AuthenticationHelper.get_auth_claims_if_enabled
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
# module: app.backend.core.authentication class AuthenticationHelper: def get_auth_claims_if_enabled(self, headers: dict) -> dict[str, Any]: <0> if not self.use_authentication: <1> return {} <2> try: <3> # Read the authentication token from the authorization header and exchange it using the On Behalf Of Flow <4> # The scope is set to the Microsoft Graph API, which may need to be called for more authorization information <5> # https://learn.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow <6> auth_token = AuthenticationHelper.get_token_auth_header(headers) <7> graph_resource_access_token = self.confidential_client.acquire_token_on_behalf_of( <8> user_assertion=auth_token, scopes=["https://graph.microsoft.com/.default"] <9> ) <10> if "error" in graph_resource_access_token: <11> raise AuthError(error=str(graph_resource_access_token), status_code=401) <12> <13> # Read the claims from the response. The oid and groups claims are used for security filtering <14> # https://learn.microsoft.com/azure/active-directory/develop/id-token-claims-reference <15> id_token_claims = graph_resource_access_token["id_token_claims"] <16> auth_claims = {"oid": id_token_claims["oid"], "groups": id_token_claims.get("groups") or []} <17> <18> # A groups claim may have been omitted either because it was not added in the application manifest for the API application, <19> # or a groups overage claim may have been emitted. <20> # https://learn.microsoft.com/azure/active-directory/develop/id-token-claims-reference#groups-overage-claim <21> missing_groups_claim = "groups" not in id_token_claims <22> has_group_overage_claim = ( <23> missing_groups_claim <24> and "_claim_names" in id_token_claims <25> </s>
===========below chunk 0=========== # module: app.backend.core.authentication class AuthenticationHelper: def get_auth_claims_if_enabled(self, headers: dict) -> dict[str, Any]: # offset: 1 ) if missing_groups_claim or has_group_overage_claim: # Read the user's groups from Microsoft Graph auth_claims["groups"] = await AuthenticationHelper.list_groups(graph_resource_access_token) return auth_claims except AuthError as e: print(e.error) logging.exception("Exception getting authorization information - " + json.dumps(e.error)) return {} except Exception: logging.exception("Exception getting authorization information") return {} ===========unchanged ref 0=========== at: app.backend.core.authentication AuthError(error, status_code) AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) at: app.backend.core.authentication.AuthenticationHelper get_token_auth_header(headers: dict) -> str list_groups(graph_resource_access_token: dict) -> list[str] at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.core.authentication class AuthenticationHelper: @staticmethod def get_token_auth_header(headers: dict) -> str: # Obtains the Access Token from the Authorization Header auth = headers.get("Authorization", None) + if auth: - if not auth: - raise AuthError( - {"code": "authorization_header_missing", "description": "Authorization header is expected"}, 401 - ) + parts = auth.split() + if parts[0].lower() != "bearer": + raise AuthError(error="Authorization header must start with Bearer", status_code=401) + elif len(parts) == 1: + raise AuthError(error="Token not found", status_code=401) + elif len(parts) > 2: + raise AuthError(error="Authorization header must be Bearer token", status_code=401) - parts = auth.split() - if parts[0].lower() != "bearer": - raise AuthError( - {"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401 - ) - elif len(parts) == 1: - raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401) - elif len(parts) > 2: - raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401) + token = parts[1] + return token + # App services built-in authentication passes the access token directly as a header + # To learn more, please visit https://learn.microsoft.com/azure/app-service/configure-authentication-oauth-tokens + token = headers.get("x-ms-token-aad-access-token", None) + if token: - token = parts[1] + return token - return token ===========changed ref 1=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 2=========== # module: app.backend.core.authentication class AuthenticationHelper: - @staticmethod + def build_security_filters(self, overrides: dict[str, Any], auth_claims: dict[str, Any]): - def build_security_filters(overrides: dict[str, Any], auth_claims: dict[str, Any]): # Build different permutations of the oid or groups security filter using OData filters # https://learn.microsoft.com/azure/search/search-security-trimming-for-azure-search # https://learn.microsoft.com/azure/search/search-query-odata-filter + use_oid_security_filter = self.require_access_control or overrides.get("use_oid_security_filter") - use_oid_security_filter = overrides.get("use_oid_security_filter") + use_groups_security_filter = self.require_access_control or overrides.get("use_groups_security_filter") - use_groups_security_filter = overrides.get("use_groups_security_filter") + + if (use_oid_security_filter or use_oid_security_filter) and not self.has_auth_fields: + raise AuthError( + error="oids and groups must be defined in the search index to use authentication", status_code=400 + ) oid_security_filter = ( "oids/any(g:search.in(g, '{}'))".format(auth_claims.get("oid") or "") if use_oid_security_filter else None ) groups_security_filter = ( "groups/any(g:search.in(g, '{}'))".format(", ".join(auth_claims.get("groups") or [])) if use_groups_security_filter else None ) # If only one security filter is specified, return that filter # If both security filters are specified, combine them with "or" so only 1 security filter needs to pass # If no security filters are specified, don't return any filter if oid_security_filter and not groups_security_filter: return oid</s> ===========changed ref 3=========== # module: app.backend.core.authentication class AuthenticationHelper: - @staticmethod + def build_security_filters(self, overrides: dict[str, Any], auth_claims: dict[str, Any]): - def build_security_filters(overrides: dict[str, Any], auth_claims: dict[str, Any]): # offset: 1 <s> security filters are specified, don't return any filter if oid_security_filter and not groups_security_filter: return oid_security_filter elif groups_security_filter and not oid_security_filter: return groups_security_filter elif oid_security_filter and groups_security_filter: return f"({oid_security_filter} or {groups_security_filter})" else: return None
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> self.auth_helper = auth_helper
<s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.embedding_model = embedding_model <4> self.embedding_deployment = embedding_deployment <5> self.sourcepage_field = sourcepage_field <6> self.content_field = content_field <7> self.gpt4v_deployment = gpt4v_deployment <8> self.gpt4v_model = gpt4v_model <9> self.query_language = query_language <10> self.query_speller = query_speller <11> self.vision_endpoint = vision_endpoint <12> self.vision_key = vision_key <13>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 7=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 8=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 9=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 10=========== + # module: scripts.auth_update + def main(): + if not test_authentication_enabled(): + print("Not updating authentication.") + exit(0) + + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + auth_headers = await get_auth_headers(credential) + + uri = os.getenv("BACKEND_URI") + client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) + if client_app_id: + client_object_id = await get_application(auth_headers, client_app_id) + if client_object_id: + print(f"Updating redirect URIs for client app ID {client_app_id}...") + # Redirect URIs need to be relative to the deployed application + payload = { + "publicClient": {"redirectUris": []}, + "spa": { + "redirectUris": [ + "http://localhost:50505/redirect", + f"{uri}/redirect", + ] + }, + "web": { + "redirectUris": [ + f"{uri}/.auth/login/aad/callback", + ] + }, + } + await update_application(auth_headers, client_object_id, payload) + print(f"Application update for client app id {client_app_id} complete.") +
tests.test_chatapproach/chat_approach
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<2>:<add> auth_helper=None,
# module: tests.test_chatapproach @pytest.fixture def chat_approach(): <0> return ChatReadRetrieveReadApproach( <1> search_client=None, <2> openai_client=None, <3> chatgpt_model="gpt-35-turbo", <4> chatgpt_deployment="chat", <5> embedding_deployment="embeddings", <6> embedding_model="text-", <7> sourcepage_field="", <8> content_field="", <9> query_language="en-us", <10> query_speller="lexicon", <11> ) <12>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 7=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 8=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 9=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key ===========changed ref 10=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 11=========== + # module: scripts.auth_update + def main(): + if not test_authentication_enabled(): + print("Not updating authentication.") + exit(0) + + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + auth_headers = await get_auth_headers(credential) + + uri = os.getenv("BACKEND_URI") + client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) + if client_app_id: + client_object_id = await get_application(auth_headers, client_app_id) + if client_object_id: + print(f"Updating redirect URIs for client app ID {client_app_id}...") + # Redirect URIs need to be relative to the deployed application + payload = { + "publicClient": {"redirectUris": []}, + "spa": { + "redirectUris": [ + "http://localhost:50505/redirect", + f"{uri}/redirect", + ] + }, + "web": { + "redirectUris": [ + f"{uri}/.auth/login/aad/callback", + ] + }, + } + await update_application(auth_headers, client_object_id, payload) + print(f"Application update for client app id {client_app_id} complete.") +
app.backend.approaches.approach/Approach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<2>:<add> self.auth_helper = auth_helper
<s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.query_language = query_language <3> self.query_speller = query_speller <4> self.embedding_deployment = embedding_deployment <5> self.embedding_model = embedding_model <6> self.openai_host = openai_host <7>
===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 7=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 8=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 9=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 10=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key ===========changed ref 11=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_update + def main(): + if not test_authentication_enabled(): + print("Not updating authentication.") + exit(0) + + credential = AzureDeveloperCliCredential(tenant_id=os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID"))) + auth_headers = await get_auth_headers(credential) + + uri = os.getenv("BACKEND_URI") + client_app_id = os.getenv("AZURE_CLIENT_APP_ID", None) + if client_app_id: + client_object_id = await get_application(auth_headers, client_app_id) + if client_object_id: + print(f"Updating redirect URIs for client app ID {client_app_id}...") + # Redirect URIs need to be relative to the deployed application + payload = { + "publicClient": {"redirectUris": []}, + "spa": { + "redirectUris": [ + "http://localhost:50505/redirect", + f"{uri}/redirect", + ] + }, + "web": { + "redirectUris": [ + f"{uri}/.auth/login/aad/callback", + ] + }, + } + await update_application(auth_headers, client_object_id, payload) + print(f"Application update for client app id {client_app_id} complete.") +
app.backend.approaches.approach/Approach.build_filter
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<1>:<add> security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) <del> security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims)
# module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: <0> exclude_category = overrides.get("exclude_category") or None <1> security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) <2> filters = [] <3> if exclude_category: <4> filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) <5> if security_filter: <6> filters.append(security_filter) <7> return None if len(filters) == 0 else " and ".join(filters) <8>
===========unchanged ref 0=========== at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) at: core.authentication.AuthenticationHelper scope: str = "https://graph.microsoft.com/.default" build_security_filters(overrides: dict[str, Any], auth_claims: dict[str, Any]) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 1=========== + # module: scripts.auth_common + + ===========changed ref 2=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 3=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 4=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 5=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 8=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 9=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 10=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 11=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key ===========changed ref 12=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", )
tests.test_authenticationhelper/create_authentication_helper
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<1>:<add> search_index=MockSearchIndex, <6>:<add> require_access_control=require_access_control, <del> token_cache_path=None,
# module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): <0> return AuthenticationHelper( <1> use_authentication=True, <2> server_app_id="SERVER_APP", <3> server_app_secret="SERVER_SECRET", <4> client_app_id="CLIENT_APP", <5> tenant_id="TENANT_ID", <6> token_cache_path=None, <7> ) <8>
===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 7=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 8=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 9=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 10=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 11=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 12=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key ===========changed ref 13=========== # module: tests.test_chatvisionapproach @pytest.fixture + def chat_approach(openai_client, mock_confidential_client_success): - def chat_approach(openai_client): return ChatReadRetrieveReadVisionApproach( search_client=None, openai_client=openai_client, + auth_helper=AuthenticationHelper( + search_index=MockSearchIndex, + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + require_access_control=None, + ), blob_container_client=None, vision_endpoint="endpoint", vision_key="key", gpt4v_deployment="gpt-4v", gpt4v_model="gpt-4v", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", )
tests.test_authenticationhelper/test_auth_setup
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> "requireAccessControl": False,
# module: tests.test_authenticationhelper def test_auth_setup(mock_confidential_client_success): <0> helper = create_authentication_helper() <1> assert helper.get_auth_setup_for_client() == { <2> "useLogin": True, <3> "msalConfig": { <4> "auth": { <5> "clientId": "CLIENT_APP", <6> "authority": "https://login.microsoftonline.com/TENANT_ID", <7> "redirectUri": "/redirect", <8> "postLogoutRedirectUri": "/", <9> "navigateToLoginRequestUrl": False, <10> }, <11> "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, <12> }, <13> "loginRequest": { <14> "scopes": [".default"], <15> }, <16> "tokenRequest": { <17> "scopes": ["api://SERVER_APP/access_as_user"], <18> }, <19> } <20>
===========changed ref 0=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 1=========== + # module: scripts.auth_common + + ===========changed ref 2=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 3=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 4=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 5=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 8=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 9=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 10=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 11=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 12=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 13=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key
tests.test_authenticationhelper/test_get_auth_token
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<13>:<add> AuthenticationHelper.get_token_auth_header({"x-ms-token-aad-access-token": "MockToken"}) == "MockToken"
# module: tests.test_authenticationhelper + def test_get_auth_token(mock_confidential_client_success): - def test_get_auth_token(): <0> with pytest.raises(AuthError) as exc_info: <1> AuthenticationHelper.get_token_auth_header({}) <2> assert exc_info.value.status_code == 401 <3> with pytest.raises(AuthError) as exc_info: <4> AuthenticationHelper.get_token_auth_header({"Authorization": ". ."}) <5> assert exc_info.value.status_code == 401 <6> with pytest.raises(AuthError) as exc_info: <7> AuthenticationHelper.get_token_auth_header({"Authorization": "invalid"}) <8> assert exc_info.value.status_code == 401 <9> with pytest.raises(AuthError) as exc_info: <10> AuthenticationHelper.get_token_auth_header({"Authorization": "invalid MockToken"}) <11> assert exc_info.value.status_code == 401 <12> assert AuthenticationHelper.get_token_auth_header({"Authorization": "Bearer MockToken"}) == "MockToken" <13>
===========changed ref 0=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 1=========== # module: tests.test_authenticationhelper def test_auth_setup(mock_confidential_client_success): helper = create_authentication_helper() assert helper.get_auth_setup_for_client() == { "useLogin": True, + "requireAccessControl": False, "msalConfig": { "auth": { "clientId": "CLIENT_APP", "authority": "https://login.microsoftonline.com/TENANT_ID", "redirectUri": "/redirect", "postLogoutRedirectUri": "/", "navigateToLoginRequestUrl": False, }, "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, }, "loginRequest": { "scopes": [".default"], }, "tokenRequest": { "scopes": ["api://SERVER_APP/access_as_user"], }, } ===========changed ref 2=========== + # module: scripts.auth_common + + ===========changed ref 3=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 4=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 5=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 6=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 7=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 8=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 11=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 13=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 14=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key
tests.test_authenticationhelper/test_build_security_filters
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<0>:<add> auth_helper = create_authentication_helper() <add> auth_helper_require_access_control = create_authentication_helper(require_access_control=True) <add> assert auth_helper.build_security_filters(overrides={}, auth_claims={}) is None <del> assert AuthenticationHelper.build_security_filters(overrides={}, auth_claims={}) is None <2>:<add> auth_helper_require_access_control.build_security_filters(overrides={}, auth_claims={}) <add> == "(oids/any(g:search.in(g, '')) or groups/any(g:search.in(g, '')))" <add> ) <add> assert ( <del> AuthenticationHelper.build_security_filters( <3>:<add> auth_helper.build_security_filters(overrides={"use_oid_security_filter": True}, auth_claims={"oid": "OID_X"}) <del> overrides={"use_oid_security_filter": True}, auth_claims={"oid": "OID_X"} <4>:<del> ) <8>:<add> auth_helper_require_access_control.build_security_filters(overrides={}, auth_claims={"oid": "OID_X"}) <add> == "(oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, '')))"
# module: tests.test_authenticationhelper + def test_build_security_filters(mock_confidential_client_success): - def test_build_security_filters(): <0> assert AuthenticationHelper.build_security_filters(overrides={}, auth_claims={}) is None <1> assert ( <2> AuthenticationHelper.build_security_filters( <3> overrides={"use_oid_security_filter": True}, auth_claims={"oid": "OID_X"} <4> ) <5> == "oids/any(g:search.in(g, 'OID_X'))" <6> ) <7> assert ( <8> AuthenticationHelper.build_security_filters( <9> overrides={"use_groups_security_filter": True}, auth_claims={"groups": ["GROUP_Y", "GROUP_Z"]} <10> ) <11> == "groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z'))" <12> ) <13> assert ( <14> AuthenticationHelper.build_security_filters( <15> overrides={"use_oid_security_filter": True, "use_groups_security_filter": True}, <16> auth_claims={"oid": "OID_X", "groups": ["GROUP_Y", "GROUP_Z"]}, <17> ) <18> == "(oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <19> ) <20> assert ( <21> AuthenticationHelper.build_security_filters( <22> overrides={"use_groups_security_filter": True}, auth_claims={"oid": "OID_X"} <23> ) <24> == "groups/any(g:search.in(g, ''))" <25> ) <26> assert ( <27> AuthenticationHelper.build_security_filters( <28> overrides={"use_oid_security_filter": True}, auth_claims={"groups": ["GROUP_Y", "GROUP_Z"]} <29> ) <30> == "oids/any(g:search.in(g, ''))" <31> ) <32>
===========changed ref 0=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 1=========== # module: tests.test_authenticationhelper def test_auth_setup(mock_confidential_client_success): helper = create_authentication_helper() assert helper.get_auth_setup_for_client() == { "useLogin": True, + "requireAccessControl": False, "msalConfig": { "auth": { "clientId": "CLIENT_APP", "authority": "https://login.microsoftonline.com/TENANT_ID", "redirectUri": "/redirect", "postLogoutRedirectUri": "/", "navigateToLoginRequestUrl": False, }, "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, }, "loginRequest": { "scopes": [".default"], }, "tokenRequest": { "scopes": ["api://SERVER_APP/access_as_user"], }, } ===========changed ref 2=========== # module: tests.test_authenticationhelper + def test_auth_setup_required_access_control(mock_confidential_client_success): + helper = create_authentication_helper(require_access_control=True) + assert helper.get_auth_setup_for_client() == { + "useLogin": True, + "requireAccessControl": True, + "msalConfig": { + "auth": { + "clientId": "CLIENT_APP", + "authority": "https://login.microsoftonline.com/TENANT_ID", + "redirectUri": "/redirect", + "postLogoutRedirectUri": "/", + "navigateToLoginRequestUrl": False, + }, + "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, + }, + "loginRequest": { + "scopes": [".default"], + }, + "tokenRequest": { + "scopes": ["api://SERVER_APP/access_as_user"], + }, + } + ===========changed ref 3=========== # module: tests.test_authenticationhelper + def test_get_auth_token(mock_confidential_client_success): - def test_get_auth_token(): with pytest.raises(AuthError) as exc_info: AuthenticationHelper.get_token_auth_header({}) assert exc_info.value.status_code == 401 with pytest.raises(AuthError) as exc_info: AuthenticationHelper.get_token_auth_header({"Authorization": ". ."}) assert exc_info.value.status_code == 401 with pytest.raises(AuthError) as exc_info: AuthenticationHelper.get_token_auth_header({"Authorization": "invalid"}) assert exc_info.value.status_code == 401 with pytest.raises(AuthError) as exc_info: AuthenticationHelper.get_token_auth_header({"Authorization": "invalid MockToken"}) assert exc_info.value.status_code == 401 assert AuthenticationHelper.get_token_auth_header({"Authorization": "Bearer MockToken"}) == "MockToken" + AuthenticationHelper.get_token_auth_header({"x-ms-token-aad-access-token": "MockToken"}) == "MockToken" ===========changed ref 4=========== + # module: scripts.auth_common + + ===========changed ref 5=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 6=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 7=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 8=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 9=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 10=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 11=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 12=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 13=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", )
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<2>:<add> self.auth_helper = auth_helper
<s>_helper: AuthenticationHelper, openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.chatgpt_model = chatgpt_model <3> self.chatgpt_deployment = chatgpt_deployment <4> self.embedding_deployment = embedding_deployment <5> self.embedding_model = embedding_model <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.query_language = query_language <9> self.query_speller = query_speller <10> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <11>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_authenticationhelper + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 8=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 11=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 13=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 14=========== <s> auth_helper: AuthenticationHelper, gpt4v_deployment: Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_key = vision_key
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> self.auth_helper = auth_helper
<s>_helper: AuthenticationHelper, openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.openai_client = openai_client <3> self.chatgpt_model = chatgpt_model <4> self.embedding_model = embedding_model <5> self.chatgpt_deployment = chatgpt_deployment <6> self.embedding_deployment = embedding_deployment <7> self.sourcepage_field = sourcepage_field <8> self.content_field = content_field <9> self.query_language = query_language <10> self.query_speller = query_speller <11>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_authenticationhelper + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 8=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 11=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 13=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 14=========== <s>_helper: AuthenticationHelper, openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model)
app.backend.approaches.chatreadretrievereadvision/ChatReadRetrieveReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<3>:<add> self.auth_helper = auth_helper
<s>pt4v_deployment: Optional[str], # Not needed for non-Azure OpenAI gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_key: str, ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.gpt4v_deployment = gpt4v_deployment <4> self.gpt4v_model = gpt4v_model <5> self.embedding_deployment = embedding_deployment <6> self.embedding_model = embedding_model <7> self.sourcepage_field = sourcepage_field <8> self.content_field = content_field <9> self.query_language = query_language <10> self.query_speller = query_speller <11> self.vision_endpoint = vision_endpoint <12> self.vision_key = vision_key <13> self.chatgpt_token_limit = get_token_limit(gpt4v_model) <14>
===========unchanged ref 0=========== at: approaches.approach.Approach __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_authenticationhelper + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 8=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 11=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 13=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters) ===========changed ref 14=========== <s>_helper: AuthenticationHelper, openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client + self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.embedding_model = embedding_model self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<5>:<del> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <7>:<add> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers)
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <8> approach: Approach <9> if use_gpt4v and CONFIG_ASK_VISION_APPROACH in current_app.config: <10> approach = cast(Approach, current_app.config[CONFIG_ASK_VISION_APPROACH]) <11> else: <12> approach = cast(Approach, current_app.config[CONFIG_ASK_APPROACH]) <13> r = await approach.run( <14> request_json["messages"], context=context, session_state=request_json.get("session_state") <15> ) <16> return jsonify(r) <17> except Exception as error: <18> return error_response(error, "/ask") <19>
===========unchanged ref 0=========== at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_ASK_VISION_APPROACH = "ask_vision_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_response(error: Exception, route: str, status_code: int=500) at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: approaches.approach.Approach run(messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any ===========changed ref 0=========== + # module: scripts.auth_common + + ===========changed ref 1=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 2=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 3=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 4=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 5=========== # module: tests.test_authenticationhelper + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 6=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 8=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 9=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 10=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 11=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 12=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True + ===========changed ref 13=========== # module: app.backend.approaches.approach class Approach: def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: exclude_category = overrides.get("exclude_category") or None + security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) - security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) filters = [] if exclude_category: filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) if security_filter: filters.append(security_filter) return None if len(filters) == 0 else " and ".join(filters)
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
6392a8cee7a872d0a5d54c6d6af9b1f118bfe048
Automate Login Setup (#891)
<5>:<del> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6>:<del> <8>:<add> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers)
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> <7> try: <8> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <9> approach: Approach <10> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <11> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <12> else: <13> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <14> <15> result = await approach.run( <16> request_json["messages"], <17> stream=request_json.get("stream", False), <18> context=context, <19> session_state=request_json.get("session_state"), <20> ) <21> if isinstance(result, dict): <22> return jsonify(result) <23> else: <24> response = await make_response(format_as_ndjson(result)) <25> response.timeout = None # type: ignore <26> response.mimetype = "application/json-lines" <27> return response <28> except Exception as error: <29> return error_response(error, "/chat") <30>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_response(error: Exception, route: str, status_code: int=500) format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, openai_host: str) at: approaches.approach.Approach run(messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any ===========changed ref 0=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] - context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: + context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_ASK_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_ASK_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_ASK_APPROACH]) r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) except Exception as error: return error_response(error, "/ask") ===========changed ref 1=========== + # module: scripts.auth_common + + ===========changed ref 2=========== + # module: scripts.auth_common + TIMEOUT = 60 + ===========changed ref 3=========== # module: app.backend.core.authentication # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API class AuthError(Exception): + def __str__(self) -> str: + return self.error or "" + ===========changed ref 4=========== + # module: scripts.auth_update + if __name__ == "__main__": + asyncio.run(main()) + ===========changed ref 5=========== + # module: scripts.auth_common + def get_auth_headers(credential: AsyncTokenCredential): + token_result = await credential.get_token("https://graph.microsoft.com/.default") + return {"Authorization": f"Bearer {token_result.token}"} + ===========changed ref 6=========== # module: tests.test_authenticationhelper + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 7=========== # module: tests.test_chatvisionapproach + MockSearchIndex = SearchIndex( + name="test", + fields=[ + SearchField(name="oids", type="Collection(Edm.String)"), + SearchField(name="groups", type="Collection(Edm.String)"), + ], + ) ===========changed ref 8=========== <s>.approaches.approach class Approach: def __init__( self, search_client: SearchClient, openai_client: AsyncOpenAI, + auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, openai_host: str, ): self.search_client = search_client self.openai_client = openai_client + self.auth_helper = auth_helper self.query_language = query_language self.query_speller = query_speller self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host ===========changed ref 9=========== # module: tests.test_authenticationhelper + def create_authentication_helper(require_access_control: bool = False): - def create_authentication_helper(): return AuthenticationHelper( + search_index=MockSearchIndex, use_authentication=True, server_app_id="SERVER_APP", server_app_secret="SERVER_SECRET", client_app_id="CLIENT_APP", tenant_id="TENANT_ID", + require_access_control=require_access_control, - token_cache_path=None, ) ===========changed ref 10=========== + # module: scripts.auth_common + def get_application(auth_headers: Dict[str, str], app_id: str) -> Optional[str]: + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.get(f"https://graph.microsoft.com/v1.0/applications(appId='{app_id}')") as response: + if response.status == 200: + response_json = await response.json() + return response_json["id"] + + return None + ===========changed ref 11=========== + # module: scripts.auth_common + def update_application(auth_headers: Dict[str, str], object_id: str, app_payload: object): + async with aiohttp.ClientSession(headers=auth_headers, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session: + async with session.patch( + f"https://graph.microsoft.com/v1.0/applications/{object_id}", json=app_payload + ) as response: + if not response.ok: + response_json = await response.json() + raise Exception(response_json) + + return True + ===========changed ref 12=========== # module: tests.test_chatapproach @pytest.fixture def chat_approach(): return ChatReadRetrieveReadApproach( search_client=None, + auth_helper=None, openai_client=None, chatgpt_model="gpt-35-turbo", chatgpt_deployment="chat", embedding_deployment="embeddings", embedding_model="text-", sourcepage_field="", content_field="", query_language="en-us", query_speller="lexicon", ) ===========changed ref 13=========== + # module: scripts.auth_common + def test_authentication_enabled(): + use_authentication = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" + require_access_control = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" + if require_access_control and not use_authentication: + print("AZURE_ENFORCE_ACCESS_CONTROL is true, but AZURE_USE_AUTHENTICATION is false. Stopping...") + return False + + if not use_authentication: + return False + + return True +
scripts.prepdocslib.filestrategy/FileStrategy.run
Modified
Azure-Samples~azure-search-openai-demo
c02908c2e074c79fe99563aac3d9b56e717fa19b
[BugFix] Fix logic in prepdocs to always update search index (#1066)
<12>:<add> <del> image_embeddings: Optional[List[List[float]]] <14>:<add> blob_image_embeddings: Optional[List[List[float]]] = None <15>:<add> blob_image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <del> image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <16>:<add> await search_manager.update_content(sections, blob_image_embeddings) <del> await search_manager.update_content(sections, image_embeddings)
# module: scripts.prepdocslib.filestrategy class FileStrategy(Strategy): def run(self, search_info: SearchInfo): <0> search_manager = SearchManager(search_info, self.search_analyzer_name, self.use_acls, self.embeddings) <1> if self.document_action == DocumentAction.Add: <2> files = self.list_file_strategy.list() <3> async for file in files: <4> try: <5> pages = [page async for page in self.pdf_parser.parse(content=file.content)] <6> if search_info.verbose: <7> print(f"Splitting '{file.filename()}' into sections") <8> sections = [ <9> Section(split_page, content=file, category=self.category) <10> for split_page in self.text_splitter.split_pages(pages) <11> ] <12> image_embeddings: Optional[List[List[float]]] <13> blob_sas_uris = await self.blob_manager.upload_blob(file) <14> if self.image_embeddings and blob_sas_uris: <15> image_embeddings = await self.image_embeddings.create_embeddings(blob_sas_uris) <16> await search_manager.update_content(sections, image_embeddings) <17> finally: <18> if file: <19> file.close() <20> elif self.document_action == DocumentAction.Remove: <21> paths = self.list_file_strategy.list_paths() <22> async for path in paths: <23> await self.blob_manager.remove_blob(path) <24> await search_manager.remove_content(path) <25> elif self.document_action == DocumentAction.RemoveAll: <26> await self.blob_manager.remove_blob() <27> await search_manager.remove_content() <28>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager.BlobManager upload_blob(file: File) -> Optional[List[str]] remove_blob(path: Optional[str]=None) at: scripts.prepdocslib.embeddings.ImageEmbeddings create_embeddings(blob_urls: List[str]) -> List[List[float]] at: scripts.prepdocslib.filestrategy DocumentAction() at: scripts.prepdocslib.filestrategy.FileStrategy.__init__ self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.pdf_parser = pdf_parser self.text_splitter = text_splitter self.document_action = document_action self.embeddings = embeddings self.image_embeddings = image_embeddings self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list() -> AsyncGenerator[File, None] list_paths() -> AsyncGenerator[str, None] at: scripts.prepdocslib.pdfparser.PdfParser parse(content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) SearchManager(search_info: SearchInfo, search_analyzer_name: Optional[str]=None, use_acls: bool=False, embeddings: Optional[OpenAIEmbeddings]=None, search_images: bool=False) at: scripts.prepdocslib.searchmanager.SearchManager update_content(sections: List[Section], image_embeddings: Optional[List[List[float]]]=None) remove_content(path: Optional[str]=None) ===========unchanged ref 1=========== at: scripts.prepdocslib.strategy SearchInfo(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], index_name: str, verbose: bool=False) at: scripts.prepdocslib.strategy.SearchInfo.__init__ self.verbose = verbose at: scripts.prepdocslib.strategy.Strategy run(self, search_info: SearchInfo) at: scripts.prepdocslib.textsplitter.TextSplitter split_pages(pages: List[Page]) -> Generator[SplitPage, None, None] at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): + def create_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self.credential, organization=self.organization) + ===========changed ref 1=========== # module: scripts.prepdocslib.embeddings class OpenAIEmbeddingService(OpenAIEmbeddings): - def create_embedding_arguments(self) -> dict[str, Any]: - return { - "model": self.open_ai_model_name, - "api_key": self.credential, - "api_type": "openai", - "organization": self.organization, - } -
scripts.prepdocslib.blobmanager/BlobManager.upload_pdf_blob_images
Modified
Azure-Samples~azure-search-openai-demo
6cc78369938e463085ae777bc83a82adc27e40fe
[BugFix] Use default font when a specific font cannot be found (#1069)
<7>:<add> <add> font = None <add> try: <add> font = ImageFont.truetype("arial.ttf", 20) <add> except OSError: <add> try: <add> font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20) <add> except OSError: <add> print("\tUnable to find arial.ttf or FreeMono.ttf, using default font") <26>:<del> try: <27>:<del> font = ImageFont.truetype("arial.ttf", 20) <28>:<del> except OSError: <29>:<del> font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20)
# module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: <0> with open(file.content.name, "rb") as reopened_file: <1> reader = PdfReader(reopened_file) <2> page_count = len(reader.pages) <3> doc = fitz.open(file.content.name) <4> sas_uris = [] <5> start_time = datetime.datetime.now(datetime.timezone.utc) <6> expiry_time = start_time + datetime.timedelta(days=1) <7> <8> for i in range(page_count): <9> blob_name = BlobManager.blob_image_name_from_file_page(file.content.name, i) <10> if self.verbose: <11> print(f"\tConverting page {i} to image and uploading -> {blob_name}") <12> <13> doc = fitz.open(file.content.name) <14> page = doc.load_page(i) <15> pix = page.get_pixmap() <16> original_img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) # type: ignore <17> <18> # Create a new image with additional space for text <19> text_height = 40 # Height of the text area <20> new_img = Image.new("RGB", (original_img.width, original_img.height + text_height), "white") <21> <22> # Paste the original image onto the new image <23> new_img.paste(original_img, (0, text_height)) <24> <25> # Draw the text on the white area <26> try: <27> font = ImageFont.truetype("arial.ttf", 20) <28> except OSError: <29> font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20) <30> draw = ImageDraw.Draw(new_img) <31> </s>
===========below chunk 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_pdf_blob_images( self, service_client: BlobServiceClient, container_client: ContainerClient, file: File ) -> List[str]: # offset: 1 # 10 pixels from the top and left of the image x = 10 y = 10 draw.text((x, y), text, font=font, fill="black") output = io.BytesIO() new_img.save(output, format="PNG") output.seek(0) blob_client = await container_client.upload_blob(blob_name, output, overwrite=True) if not self.user_delegation_key: self.user_delegation_key = await service_client.get_user_delegation_key(start_time, expiry_time) if blob_client.account_name is not None: sas_token = generate_blob_sas( account_name=blob_client.account_name, container_name=blob_client.container_name, blob_name=blob_client.blob_name, user_delegation_key=self.user_delegation_key, permission=BlobSasPermissions(read=True), expiry=expiry_time, start=start_time, ) sas_uris.append(f"{blob_client.url}?{sas_token}") return sas_uris
app.backend.core.authentication/AuthenticationHelper.build_security_filters
Modified
Azure-Samples~azure-search-openai-demo
e518ab0589af8d8704761d85db68ec4dffbfef55
bugfix:in app/backend/core/authenticatio.py/119line-'use_oid_security_filter' does not work (#1086)
<6>:<add> if (use_oid_security_filter or use_groups_security_filter) and not self.has_auth_fields: <del> if (use_oid_security_filter or use_oid_security_filter) and not self.has_auth_fields:
# module: app.backend.core.authentication class AuthenticationHelper: def build_security_filters(self, overrides: dict[str, Any], auth_claims: dict[str, Any]): <0> # Build different permutations of the oid or groups security filter using OData filters <1> # https://learn.microsoft.com/azure/search/search-security-trimming-for-azure-search <2> # https://learn.microsoft.com/azure/search/search-query-odata-filter <3> use_oid_security_filter = self.require_access_control or overrides.get("use_oid_security_filter") <4> use_groups_security_filter = self.require_access_control or overrides.get("use_groups_security_filter") <5> <6> if (use_oid_security_filter or use_oid_security_filter) and not self.has_auth_fields: <7> raise AuthError( <8> error="oids and groups must be defined in the search index to use authentication", status_code=400 <9> ) <10> <11> oid_security_filter = ( <12> "oids/any(g:search.in(g, '{}'))".format(auth_claims.get("oid") or "") if use_oid_security_filter else None <13> ) <14> groups_security_filter = ( <15> "groups/any(g:search.in(g, '{}'))".format(", ".join(auth_claims.get("groups") or [])) <16> if use_groups_security_filter <17> else None <18> ) <19> <20> # If only one security filter is specified, return that filter <21> # If both security filters are specified, combine them with "or" so only 1 security filter needs to pass <22> # If no security filters are specified, don't return any filter <23> if oid_security_filter and not groups_security_filter: <24> return oid_security_filter <25> elif groups_security_filter and not oid_security_filter: <26> return groups_security_filter <27> elif oid_security_filter and groups_security_filter: <28> return f"({oid_security_filter} or {groups_security</s>
===========below chunk 0=========== # module: app.backend.core.authentication class AuthenticationHelper: def build_security_filters(self, overrides: dict[str, Any], auth_claims: dict[str, Any]): # offset: 1 else: return None ===========unchanged ref 0=========== at: app.backend.core.authentication AuthError(error, status_code) at: app.backend.core.authentication.AuthenticationHelper scope: str = "https://graph.microsoft.com/.default" at: app.backend.core.authentication.AuthenticationHelper.__init__ self.has_auth_fields = "oids" in field_names and "groups" in field_names self.has_auth_fields = False self.require_access_control = False self.require_access_control = require_access_control at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
tests.test_authenticationhelper/test_auth_setup
Modified
Azure-Samples~azure-search-openai-demo
0b8724ad761c1d93489ff254bc3a66ad3fceedf2
Update Authentication to use local storage and easy auth refresh (#1117)
<12>:<add> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <del> "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False},
# module: tests.test_authenticationhelper def test_auth_setup(mock_confidential_client_success): <0> helper = create_authentication_helper() <1> assert helper.get_auth_setup_for_client() == { <2> "useLogin": True, <3> "requireAccessControl": False, <4> "msalConfig": { <5> "auth": { <6> "clientId": "CLIENT_APP", <7> "authority": "https://login.microsoftonline.com/TENANT_ID", <8> "redirectUri": "/redirect", <9> "postLogoutRedirectUri": "/", <10> "navigateToLoginRequestUrl": False, <11> }, <12> "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, <13> }, <14> "loginRequest": { <15> "scopes": [".default"], <16> }, <17> "tokenRequest": { <18> "scopes": ["api://SERVER_APP/access_as_user"], <19> }, <20> } <21>
===========unchanged ref 0=========== at: tests.conftest mock_confidential_client_success(monkeypatch) at: tests.test_authenticationhelper create_authentication_helper(require_access_control: bool=False)
tests.test_authenticationhelper/test_auth_setup_required_access_control
Modified
Azure-Samples~azure-search-openai-demo
0b8724ad761c1d93489ff254bc3a66ad3fceedf2
Update Authentication to use local storage and easy auth refresh (#1117)
<12>:<add> "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, <del> "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False},
# module: tests.test_authenticationhelper def test_auth_setup_required_access_control(mock_confidential_client_success): <0> helper = create_authentication_helper(require_access_control=True) <1> assert helper.get_auth_setup_for_client() == { <2> "useLogin": True, <3> "requireAccessControl": True, <4> "msalConfig": { <5> "auth": { <6> "clientId": "CLIENT_APP", <7> "authority": "https://login.microsoftonline.com/TENANT_ID", <8> "redirectUri": "/redirect", <9> "postLogoutRedirectUri": "/", <10> "navigateToLoginRequestUrl": False, <11> }, <12> "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, <13> }, <14> "loginRequest": { <15> "scopes": [".default"], <16> }, <17> "tokenRequest": { <18> "scopes": ["api://SERVER_APP/access_as_user"], <19> }, <20> } <21>
===========unchanged ref 0=========== at: tests.test_authenticationhelper create_authentication_helper(require_access_control: bool=False) ===========changed ref 0=========== # module: tests.test_authenticationhelper def test_auth_setup(mock_confidential_client_success): helper = create_authentication_helper() assert helper.get_auth_setup_for_client() == { "useLogin": True, "requireAccessControl": False, "msalConfig": { "auth": { "clientId": "CLIENT_APP", "authority": "https://login.microsoftonline.com/TENANT_ID", "redirectUri": "/redirect", "postLogoutRedirectUri": "/", "navigateToLoginRequestUrl": False, }, + "cache": {"cacheLocation": "localStorage", "storeAuthStateInCookie": False}, - "cache": {"cacheLocation": "sessionStorage", "storeAuthStateInCookie": False}, }, "loginRequest": { "scopes": [".default"], }, "tokenRequest": { "scopes": ["api://SERVER_APP/access_as_user"], }, }