path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
app.backend.core.messagebuilder/MessageBuilder.__init__
Modified
Azure-Samples~azure-search-openai-demo
969272dd68d2bde4a3f4172f1bc330d7cfe3ba3f
Normalize text in messages to reduce token length (#688)
<0>:<add> self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] <del> self.messages = [{"role": "system", "content": system_content}]
# module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): <0> self.messages = [{"role": "system", "content": system_content}] <1> self.model = chatgpt_model <2> self.token_length = num_tokens_from_messages(self.messages[-1], self.model) <3>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder normalize_content(content: str) ===========changed ref 0=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"} + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 4 + ===========changed ref 1=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode_append(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + builder.append_message("user", "a\u0301") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"}, + # 1 token, 1 token, 1 token, 1 token + {"role": "user", "content": "á"}, + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 8 + ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], + user_content: str, - user_conv: str, few_shots=[], max_tokens: int = 4096, ) -> list: message_builder = MessageBuilder(system_prompt, model_id) # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. for shot in few_shots: message_builder.append_message(shot.get("role"), shot.get("content")) - user_content = user_conv append_index = len(few_shots) + 1 message_builder.append_message(self.USER, user_content, index=append_index) for h in reversed(history[:-1]): + if message_builder.token_length > max_tokens: + break if bot_msg := h.get("bot"): message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) if user_msg := h.get("user"): message_builder.append_message(self.USER, user_msg, index=append_index) - if message_builder.token_length > max_tokens: - break + return message_builder.messages - messages = message_builder.messages - return messages ===========changed ref 3=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + messages = chat_approach.get_messages_from_history( + system_prompt="You are a bot.", + model_id="gpt-35-turbo", + history=[ + { + "user": "What happens in a performance review?", + "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + }, + {"user": "What does a Product Manager do?"}, + ], + user_content="What does a Product Manager do?", + max_tokens=10, + ) + assert messages == [ + {"role": "system", "content": "You are a bot."}, + {"role": "user", "content": "What does a Product Manager do?"}, + ] + ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + messages = chat_approach.get_messages_from_history( + system_prompt="You are a bot.", + model_id="gpt-35-turbo", + history=[ + { + "user": "What happens in a performance review?", + "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + }, + { + "user": "Is there a dress code?", + "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", + }, + {"user": "What does a Product Manager do?"}, + ], + user_content="What does a Product Manager do?", + max_tokens=30, + ) + assert messages == [ + {"role": "system", "content": "You are a bot."}, + {"role": "user", "content": "Is there a dress code?"}, + { + "role": "assistant", + "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", + }, + {"role": "user", "content":</s> ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(): # offset: 1 <s>! [employee_handbook-1.pdf]", + }, + {"role": "user", "content": "What does a Product Manager do?"}, + ] +
app.backend.core.messagebuilder/MessageBuilder.append_message
Modified
Azure-Samples~azure-search-openai-demo
969272dd68d2bde4a3f4172f1bc330d7cfe3ba3f
Normalize text in messages to reduce token length (#688)
<0>:<add> self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) <del> self.messages.insert(index, {"role": role, "content": content})
# module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): <0> self.messages.insert(index, {"role": role, "content": content}) <1> self.token_length += num_tokens_from_messages(self.messages[index], self.model) <2>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model at: app.backend.core.messagebuilder.MessageBuilder.append_message self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): + self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] - self.messages = [{"role": "system", "content": system_content}] self.model = chatgpt_model self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 1=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"} + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 4 + ===========changed ref 2=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode_append(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + builder.append_message("user", "a\u0301") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"}, + # 1 token, 1 token, 1 token, 1 token + {"role": "user", "content": "á"}, + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 8 + ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], + user_content: str, - user_conv: str, few_shots=[], max_tokens: int = 4096, ) -> list: message_builder = MessageBuilder(system_prompt, model_id) # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. for shot in few_shots: message_builder.append_message(shot.get("role"), shot.get("content")) - user_content = user_conv append_index = len(few_shots) + 1 message_builder.append_message(self.USER, user_content, index=append_index) for h in reversed(history[:-1]): + if message_builder.token_length > max_tokens: + break if bot_msg := h.get("bot"): message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) if user_msg := h.get("user"): message_builder.append_message(self.USER, user_msg, index=append_index) - if message_builder.token_length > max_tokens: - break + return message_builder.messages - messages = message_builder.messages - return messages ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + messages = chat_approach.get_messages_from_history( + system_prompt="You are a bot.", + model_id="gpt-35-turbo", + history=[ + { + "user": "What happens in a performance review?", + "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + }, + {"user": "What does a Product Manager do?"}, + ], + user_content="What does a Product Manager do?", + max_tokens=10, + ) + assert messages == [ + {"role": "system", "content": "You are a bot."}, + {"role": "user", "content": "What does a Product Manager do?"}, + ] + ===========changed ref 5=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + messages = chat_approach.get_messages_from_history( + system_prompt="You are a bot.", + model_id="gpt-35-turbo", + history=[ + { + "user": "What happens in a performance review?", + "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + }, + { + "user": "Is there a dress code?", + "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", + }, + {"user": "What does a Product Manager do?"}, + ], + user_content="What does a Product Manager do?", + max_tokens=30, + ) + assert messages == [ + {"role": "system", "content": "You are a bot."}, + {"role": "user", "content": "Is there a dress code?"}, + { + "role": "assistant", + "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", + }, + {"role": "user", "content":</s> ===========changed ref 6=========== # module: tests.test_chatapproach + def test_get_messages_from_history_truncated_longer(): # offset: 1 <s>! [employee_handbook-1.pdf]", + }, + {"role": "user", "content": "What does a Product Manager do?"}, + ] +
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
c505750f4d4090e2e58baf908b36d33a3fc352a2
Conform to Chat App specification for responses (#679)
<2>:<add> self.responses = [ <del> self.num = 2 <3>:<add> {"object": "chat.completion.chunk", "choices": []}, <add> {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, <add> {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, <add> ] <del> self.answer = answer <9>:<del> if self.num == 2: <10>:<del> self.num -= 1 <11>:<del> # Emulate the first response being empty - bug with "2023-07-01-preview" <12>:<del> return openai.util.convert_to_openai_object({"choices": []}) <13>:<del> elif self.num == 1: <14>:<del> self.num -= 1 <15>:<del> return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) <16>:<add> if self.responses: <add> return self.responses.pop(0) <32>:<add> return openai.util.convert_to_openai_object( <add> {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} <add> ) <del> return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]})
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer): <2> self.num = 2 <3> self.answer = answer <4> <5> def __aiter__(self): <6> return self <7> <8> async def __anext__(self): <9> if self.num == 2: <10> self.num -= 1 <11> # Emulate the first response being empty - bug with "2023-07-01-preview" <12> return openai.util.convert_to_openai_object({"choices": []}) <13> elif self.num == 1: <14> self.num -= 1 <15> return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) <16> else: <17> raise StopAsyncIteration <18> <19> async def mock_acreate(*args, **kwargs): <20> if openai.api_type == "openai": <21> assert kwargs.get("deployment_id") is None <22> else: <23> assert kwargs.get("deployment_id") is not None <24> messages = kwargs["messages"] <25> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <26> answer = "capital of France" <27> else: <28> answer = "The capital of France is Paris." <29> if "stream" in kwargs and kwargs["stream"] is True: <30> return AsyncChatCompletionIterator(answer) <31> else: <32> return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) <33> <34> monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) <35>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: openai api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.util convert_to_openai_object(resp, api_key=None, api_version=None, organization=None, engine=None, plain_old_data=False) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
c505750f4d4090e2e58baf908b36d33a3fc352a2
Conform to Chat App specification for responses (#679)
<4>:<del> chat_content = chat_resp.choices[0].message.content <5>:<del> extra_info["answer"] = chat_content <6>:<del> return extra_info <7>:<add> chat_resp.choices[0]["extra_args"] = extra_info <add> return chat_resp
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=False <2> ) <3> chat_resp = await chat_coroutine <4> chat_content = chat_resp.choices[0].message.content <5> extra_info["answer"] = chat_content <6> return extra_info <7>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple ===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): class AsyncChatCompletionIterator: def __init__(self, answer): + self.responses = [ - self.num = 2 + {"object": "chat.completion.chunk", "choices": []}, + {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, + {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, + ] - self.answer = answer def __aiter__(self): return self async def __anext__(self): - if self.num == 2: - self.num -= 1 - # Emulate the first response being empty - bug with "2023-07-01-preview" - return openai.util.convert_to_openai_object({"choices": []}) - elif self.num == 1: - self.num -= 1 - return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) + if self.responses: + return self.responses.pop(0) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): if openai.api_type == "openai": assert kwargs.get("deployment_id") is None else: assert kwargs.get("deployment_id") is not None messages = kwargs["messages"] if messages[-1]["content"] == "Generate search query for: What is the capital of France?": answer = "capital of France" else: answer = "The capital of France is Paris." if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: + return openai.util.convert_to_openai_object( + {"object": "chat.completion", "choices": [</s> ===========changed ref 1=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 1 <s> return openai.util.convert_to_openai_object( + {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} + ) - return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate)
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
c505750f4d4090e2e58baf908b36d33a3fc352a2
Conform to Chat App specification for responses (#679)
<3>:<add> yield { <add> "choices": [ <add> {"delta": {"role": self.ASSISTANT}, "extra_args": extra_info, "finish_reason": None, "index": 0} <add> ], <add> "object": "chat.completion.chunk", <add> } <add> <del> yield extra_info
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=True <2> ) <3> yield extra_info <4> async for event in await chat_coroutine: <5> # "2023-07-01-preview" API version has a bug where first response has empty choices <6> if event["choices"]: <7> yield event <8>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = await chat_coroutine - chat_content = chat_resp.choices[0].message.content - extra_info["answer"] = chat_content - return extra_info + chat_resp.choices[0]["extra_args"] = extra_info + return chat_resp ===========changed ref 1=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): class AsyncChatCompletionIterator: def __init__(self, answer): + self.responses = [ - self.num = 2 + {"object": "chat.completion.chunk", "choices": []}, + {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, + {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, + ] - self.answer = answer def __aiter__(self): return self async def __anext__(self): - if self.num == 2: - self.num -= 1 - # Emulate the first response being empty - bug with "2023-07-01-preview" - return openai.util.convert_to_openai_object({"choices": []}) - elif self.num == 1: - self.num -= 1 - return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) + if self.responses: + return self.responses.pop(0) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): if openai.api_type == "openai": assert kwargs.get("deployment_id") is None else: assert kwargs.get("deployment_id") is not None messages = kwargs["messages"] if messages[-1]["content"] == "Generate search query for: What is the capital of France?": answer = "capital of France" else: answer = "The capital of France is Paris." if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: + return openai.util.convert_to_openai_object( + {"object": "chat.completion", "choices": [</s> ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 1 <s> return openai.util.convert_to_openai_object( + {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} + ) - return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate)
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
c505750f4d4090e2e58baf908b36d33a3fc352a2
Conform to Chat App specification for responses (#679)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top", 3) <4> filter = self.build_filter(overrides, auth_claims) <5> <6> # If retrieval mode includes vectors, compute an embedding for the query <7> if has_vector: <8> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <9> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <10> query_vector = embedding["data"][0]["embedding"] <11> else: <12> query_vector = None <13> <14> # Only keep the text query if the retrieval mode uses text, otherwise drop it <15> query_text = q if has_text else "" <16> <17> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <18> if overrides.get("semantic_ranker") and has_text: <19> r = await self.search_client.search( <20> query_text, <21> filter=filter, <22> query_type=QueryType.SEMANTIC, <23> query_language="en-us", <24> query_speller="lexicon", <25> semantic_configuration_name="default", <26> top=top, <27> query_caption="extractive|highlight-false" if use_semantic_captions else None, <28> vector=query_vector, <29> top_k=50 if query_vector else None, <30> vector_fields="</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: # offset: 1 ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: # offset: 2 <s>gpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) return { "data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> str at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 2=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
scripts.prepdocs/remove_blobs
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<8>:<add> blobs = iter(blob_container.list_blob_names()) <del> blobs = blob_container.list_blob_names()
# module: scripts.prepdocs def remove_blobs(filename): <0> if args.verbose: <1> print(f"Removing blobs for '{filename or '<all>'}'") <2> blob_service = BlobServiceClient( <3> account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds <4> ) <5> blob_container = blob_service.get_container_client(args.container) <6> if blob_container.exists(): <7> if filename is None: <8> blobs = blob_container.list_blob_names() <9> else: <10> prefix = os.path.splitext(os.path.basename(filename))[0] <11> blobs = filter( <12> lambda b: re.match(f"{prefix}-\d+\.pdf", b), <13> blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), <14> ) <15> for b in blobs: <16> if args.verbose: <17> print(f"\tRemoving blob {b}") <18> blob_container.delete_blob(b) <19>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: re match(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] match(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() storage_creds = None storage_creds = azd_credential if args.storagekey is None else args.storagekey ===========changed ref 0=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/get_document_text
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<24>:<add> for table in (form_recognizer_results.tables or []) <del> for table in form_recognizer_results.tables <25>:<add> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 <del> if table.bounding_regions[0].page_number == page_num + 1
# module: scripts.prepdocs def get_document_text(filename): <0> offset = 0 <1> page_map = [] <2> if args.localpdfparser: <3> reader = PdfReader(filename) <4> pages = reader.pages <5> for page_num, p in enumerate(pages): <6> page_text = p.extract_text() <7> page_map.append((page_num, offset, page_text)) <8> offset += len(page_text) <9> else: <10> if args.verbose: <11> print(f"Extracting text from '{filename}' using Azure Form Recognizer") <12> form_recognizer_client = DocumentAnalysisClient( <13> endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", <14> credential=formrecognizer_creds, <15> headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}, <16> ) <17> with open(filename, "rb") as f: <18> poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f) <19> form_recognizer_results = poller.result() <20> <21> for page_num, page in enumerate(form_recognizer_results.pages): <22> tables_on_page = [ <23> table <24> for table in form_recognizer_results.tables <25> if table.bounding_regions[0].page_number == page_num + 1 <26> ] <27> <28> # mark all positions of the table spans in the page <29> page_offset = page.spans[0].offset <30> page_length = page.spans[0].length <31> table_chars = [-1] * page_length <32> for table_id, table in enumerate(tables_on_page): <33> for span in table.spans: <34> # replace all table spans with "table_id" in table_chars array <35> for i in range(span.length): <36> idx = span.offset - page_offset + i <37> </s>
===========below chunk 0=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 table_chars[idx] = table_id # build page text by replacing characters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] elif table_id not in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map ===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() table_to_html(table) formrecognizer_creds: Union[TokenCredential, AzureKeyCredential] = azd_credential ===========changed ref 0=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient( account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds ) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): if filename is None: + blobs = iter(blob_container.list_blob_names()) - blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter( lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), ) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 1=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/update_embeddings_in_batch
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<0>:<add> batch_queue: list = [] <del> batch_queue = []
# module: scripts.prepdocs def update_embeddings_in_batch(sections): <0> batch_queue = [] <1> copy_s = [] <2> batch_response = {} <3> token_count = 0 <4> for s in sections: <5> token_count += calculate_tokens_emb_aoai(s["content"]) <6> if ( <7> token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"] <8> and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"] <9> ): <10> batch_queue.append(s) <11> copy_s.append(s) <12> else: <13> emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) <14> if args.verbose: <15> print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") <16> for emb, item in zip(emb_responses, batch_queue): <17> batch_response[item["id"]] = emb <18> batch_queue = [] <19> batch_queue.append(s) <20> token_count = calculate_tokens_emb_aoai(s["content"]) <21> <22> if batch_queue: <23> emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) <24> if args.verbose: <25> print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") <26> for emb, item in zip(emb_responses, batch_queue): <27> batch_response[item["id"]] = emb <28> <29> for s in copy_s: <30> s["embedding"] = batch_response[s["id"]] <31> yield s <32>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} calculate_tokens_emb_aoai(input: str) compute_embedding_in_batch(texts) ===========changed ref 0=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient( account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds ) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): if filename is None: + blobs = iter(blob_container.list_blob_names()) - blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter( lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), ) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 1=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 2=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient( endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}, ) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [ table + for table in (form_recognizer_results.tables or []) - for table in form_recognizer_results.tables + if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 - if table.bounding_regions[0].page_number == page_num + 1 ] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1] * page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx</s> ===========changed ref 3=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >= 0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing characters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] elif table_id not in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map
scripts.prepdocs/remove_from_index
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<10>:<add> removed_docs = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) <del> r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) <12>:<add> print(f"\tRemoved {len(removed_docs)} sections from index") <del> print(f"\tRemoved {len(r)} sections from index")
# module: scripts.prepdocs def remove_from_index(filename): <0> if args.verbose: <1> print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") <2> search_client = SearchClient( <3> endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds <4> ) <5> while True: <6> filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" <7> r = search_client.search("", filter=filter, top=1000, include_total_count=True) <8> if r.get_count() == 0: <9> break <10> r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) <11> if args.verbose: <12> print(f"\tRemoved {len(r)} sections from index") <13> # It can take a few seconds for search results to reflect changes, so wait a bit <14> time.sleep(2) <15>
===========unchanged ref 0=========== at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() search_creds: Union[TokenCredential, AzureKeyCredential] = azd_credential at: scripts.prepdocs.index_sections results = search_client.upload_documents(documents=batch) succeeded = sum([1 for r in results if r.succeeded]) ===========changed ref 0=========== # module: scripts.prepdocs def update_embeddings_in_batch(sections): + batch_queue: list = [] - batch_queue = [] copy_s = [] batch_response = {} token_count = 0 for s in sections: token_count += calculate_tokens_emb_aoai(s["content"]) if ( token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"] ): batch_queue.append(s) copy_s.append(s) else: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb batch_queue = [] batch_queue.append(s) token_count = calculate_tokens_emb_aoai(s["content"]) if batch_queue: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb for s in copy_s: s["embedding"] = batch_response[s["id"]] yield s ===========changed ref 1=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient( account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds ) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): if filename is None: + blobs = iter(blob_container.list_blob_names()) - blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter( lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), ) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 2=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 3=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient( endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}, ) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [ table + for table in (form_recognizer_results.tables or []) - for table in form_recognizer_results.tables + if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 - if table.bounding_regions[0].page_number == page_num + 1 ] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1] * page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx</s> ===========changed ref 4=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >= 0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing characters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] elif table_id not in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map
scripts.prepdocs/read_adls_gen2_files
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<17>:<add> acls: Optional[dict[str, list]] = None <del> acls = None
# module: scripts.prepdocs def read_adls_gen2_files( + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: Optional[str] = None, + embedding_model: Optional[str] = None, - use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None, embedding_model: str = None ): <0> datalake_service = DataLakeServiceClient( <1> account_url=f"https://{args.datalakestorageaccount}.dfs.core.windows.net", credential=adls_gen2_creds <2> ) <3> filesystem_client = datalake_service.get_file_system_client(file_system=args.datalakefilesystem) <4> paths = filesystem_client.get_paths(path=args.datalakepath, recursive=True) <5> for path in paths: <6> if not path.is_directory: <7> if args.remove: <8> remove_blobs(path.name) <9> remove_from_index(path.name) <10> else: <11> temp_file_path = os.path.join(tempfile.gettempdir(), os.path.basename(path.name)) <12> try: <13> temp_file = open(temp_file_path, "wb") <14> file_client = filesystem_client.get_file_client(path) <15> file_client.download_file().readinto(temp_file) <16> <17> acls = None <18> if args.useacls: <19> # Parse out user ids and group ids <20> acls = {"oids": [], "groups": []} <21> # https://learn.microsoft.com/python/api/azure-storage-file-datalake/azure.storage.filedatalake.datalakefileclient?view=azure-python#azure-storage-filedatalake-datalakefileclient-get-access-control <22> # Request ACLs as GUIDs <23> acl_list = file_client.get_access_control(upn=False)["acl"] <24> </s>
===========below chunk 0=========== # module: scripts.prepdocs def read_adls_gen2_files( + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: Optional[str] = None, + embedding_model: Optional[str] = None, - use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None, embedding_model: str = None ): # offset: 1 # ACL Format: user::rwx,group::r-x,other::r--,user:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx:r-- acl_list = acl_list.split(",") for acl in acl_list: acl_parts = acl.split(":") if len(acl_parts) != 3: continue if len(acl_parts[1]) == 0: continue if acl_parts[0] == "user" and "r" in acl_parts[2]: acls["oids"].append(acl_parts[1]) if acl_parts[0] == "group" and "r" in acl_parts[2]: acls["groups"].append(acl_parts[1]) if not args.skipblobs: upload_blobs(temp_file.name) page_map = get_document_text(temp_file.name) sections = create_sections( os.path.basename(path.name), page_map, use_vectors and not vectors_batch_support, embedding_deployment, embedding_model, ) if use_vectors and vectors_batch_support: sections = update_embeddings_in_batch(sections) index_sections(os.path.basename(path.name), sections, acls) except Exception as e: print(f"\tGot an error while reading {path.name} -> {e} --> skipping file") finally: try: temp_file.close() os.remove(temp_file_path) except Exception as e: print(f"\tGot an error</s> ===========below chunk 1=========== # module: scripts.prepdocs def read_adls_gen2_files( + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: Optional[str] = None, + embedding_model: Optional[str] = None, - use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None, embedding_model: str = None ): # offset: 2 <s>() os.remove(temp_file_path) except Exception as e: print(f"\tGot an error while deleting {temp_file_path} -> {e}") ===========unchanged ref 0=========== at: io.FileIO mode: str name: Union[int, str] # type: ignore at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() adls_gen2_creds = None adls_gen2_creds = azd_credential if args.datalakekey is None else AzureKeyCredential(args.datalakekey) upload_blobs(filename) remove_blobs(filename) get_document_text(filename) create_sections(filename, page_map, use_vectors, embedding_deployment: Optional[str]=None, embedding_model: Optional[str]=None) update_embeddings_in_batch(sections) index_sections(filename, sections, acls=None) remove_from_index(filename) at: tempfile gettempdir() -> str at: typing.IO __slots__ = () ===========changed ref 0=========== # module: scripts.prepdocs def remove_from_index(filename): if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") search_client = SearchClient( endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds ) while True: filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" r = search_client.search("", filter=filter, top=1000, include_total_count=True) if r.get_count() == 0: break + removed_docs = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) - r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) if args.verbose: + print(f"\tRemoved {len(removed_docs)} sections from index") - print(f"\tRemoved {len(r)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit time.sleep(2) ===========changed ref 1=========== # module: scripts.prepdocs def update_embeddings_in_batch(sections): + batch_queue: list = [] - batch_queue = [] copy_s = [] batch_response = {} token_count = 0 for s in sections: token_count += calculate_tokens_emb_aoai(s["content"]) if ( token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"] ): batch_queue.append(s) copy_s.append(s) else: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb batch_queue = [] batch_queue.append(s) token_count = calculate_tokens_emb_aoai(s["content"]) if batch_queue: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb for s in copy_s: s["embedding"] = batch_response[s["id"]] yield s
scripts.adlsgen2setup/AdlsGen2Setup.__init__
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<22>:<add> self.graph_headers: Optional[dict[str, str]] = None <del> self.graph_headers = None
# module: scripts.adlsgen2setup class AdlsGen2Setup: def __init__( self, data_directory: str, storage_account_name: str, filesystem_name: str, security_enabled_groups: bool, + data_access_control_format: dict[str, Any], - data_access_control_format: dict[str, any], credentials: AsyncTokenCredential, ): <0> """ <1> Initializes the command <2> <3> Parameters <4> ---------- <5> data_directory <6> Directory where sample files are located <7> storage_account_name <8> Name of the Data Lake Storage Gen 2 account to use <9> filesystem_name <10> Name of the container / filesystem in the Data Lake Storage Gen 2 account to use <11> security_enabled_groups <12> When creating groups in Azure AD, whether or not to make them security enabled <13> data_access_control_format <14> File describing how to create groups, upload files with access control. See the sampleacls.json for the format of this file <15> """ <16> self.data_directory = data_directory <17> self.storage_account_name = storage_account_name <18> self.filesystem_name = filesystem_name <19> self.credentials = credentials <20> self.security_enabled_groups = security_enabled_groups <21> self.data_access_control_format = data_access_control_format <22> self.graph_headers = None <23>
===========changed ref 0=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 1=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient( account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds ) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): if filename is None: + blobs = iter(blob_container.list_blob_names()) - blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter( lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), ) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 2=========== # module: scripts.prepdocs def remove_from_index(filename): if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") search_client = SearchClient( endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds ) while True: filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" r = search_client.search("", filter=filter, top=1000, include_total_count=True) if r.get_count() == 0: break + removed_docs = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) - r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) if args.verbose: + print(f"\tRemoved {len(removed_docs)} sections from index") - print(f"\tRemoved {len(r)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit time.sleep(2) ===========changed ref 3=========== # module: scripts.prepdocs def update_embeddings_in_batch(sections): + batch_queue: list = [] - batch_queue = [] copy_s = [] batch_response = {} token_count = 0 for s in sections: token_count += calculate_tokens_emb_aoai(s["content"]) if ( token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"] ): batch_queue.append(s) copy_s.append(s) else: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb batch_queue = [] batch_queue.append(s) token_count = calculate_tokens_emb_aoai(s["content"]) if batch_queue: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb for s in copy_s: s["embedding"] = batch_response[s["id"]] yield s ===========changed ref 4=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient( endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}, ) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [ table + for table in (form_recognizer_results.tables or []) - for table in form_recognizer_results.tables + if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 - if table.bounding_regions[0].page_number == page_num + 1 ] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1] * page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx</s>
scripts.manageacl/main
Modified
Azure-Samples~azure-search-openai-demo
25a2e0ed3c165d6da22a25dbfe149d1dd323fdbb
Fixed type annotations per mypy and added mypy to CICD (#749)
<6>:<add> search_credential: Union[AsyncTokenCredential, AzureKeyCredential] = azd_credential <add> if args.search_key is not None: <add> search_credential = AzureKeyCredential(args.search_key) <del> search_credential = azd_credential if args.search_key is None else AzureKeyCredential(args.search_key)
# module: scripts.manageacl + def main(args: Any): - def main(args: any): <0> # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them <1> azd_credential = ( <2> AzureDeveloperCliCredential() <3> if args.tenant_id is None <4> else AzureDeveloperCliCredential(tenant_id=args.tenant_id, process_timeout=60) <5> ) <6> search_credential = azd_credential if args.search_key is None else AzureKeyCredential(args.search_key) <7> <8> command = ManageAcl( <9> service_name=args.search_service, <10> index_name=args.index, <11> document=args.document, <12> acl_action=args.acl_action, <13> acl_type=args.acl_type, <14> acl=args.acl, <15> credentials=search_credential, <16> ) <17> await command.run() <18>
===========unchanged ref 0=========== at: scripts.manageacl ManageAcl(service_name: str, index_name: str, document: str, acl_action: str, acl_type: str, acl: str, credentials: Union[AsyncTokenCredential, AzureKeyCredential]) ===========changed ref 0=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def __init__( self, data_directory: str, storage_account_name: str, filesystem_name: str, security_enabled_groups: bool, + data_access_control_format: dict[str, Any], - data_access_control_format: dict[str, any], credentials: AsyncTokenCredential, ): """ Initializes the command Parameters ---------- data_directory Directory where sample files are located storage_account_name Name of the Data Lake Storage Gen 2 account to use filesystem_name Name of the container / filesystem in the Data Lake Storage Gen 2 account to use security_enabled_groups When creating groups in Azure AD, whether or not to make them security enabled data_access_control_format File describing how to create groups, upload files with access control. See the sampleacls.json for the format of this file """ self.data_directory = data_directory self.storage_account_name = storage_account_name self.filesystem_name = filesystem_name self.credentials = credentials self.security_enabled_groups = security_enabled_groups self.data_access_control_format = data_access_control_format + self.graph_headers: Optional[dict[str, str]] = None - self.graph_headers = None ===========changed ref 1=========== # module: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) adls_gen2_creds = None storage_creds = None MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache: dict[str, Any] = {} - open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 2=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient( account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds ) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): if filename is None: + blobs = iter(blob_container.list_blob_names()) - blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter( lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]), ) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 3=========== # module: scripts.prepdocs def remove_from_index(filename): if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") search_client = SearchClient( endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds ) while True: filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" r = search_client.search("", filter=filter, top=1000, include_total_count=True) if r.get_count() == 0: break + removed_docs = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) - r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r]) if args.verbose: + print(f"\tRemoved {len(removed_docs)} sections from index") - print(f"\tRemoved {len(r)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit time.sleep(2) ===========changed ref 4=========== # module: scripts.prepdocs def update_embeddings_in_batch(sections): + batch_queue: list = [] - batch_queue = [] copy_s = [] batch_response = {} token_count = 0 for s in sections: token_count += calculate_tokens_emb_aoai(s["content"]) if ( token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"] ): batch_queue.append(s) copy_s.append(s) else: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb batch_queue = [] batch_queue.append(s) token_count = calculate_tokens_emb_aoai(s["content"]) if batch_queue: emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") for emb, item in zip(emb_responses, batch_queue): batch_response[item["id"]] = emb for s in copy_s: s["embedding"] = batch_response[s["id"]] yield s
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
79bfb0f17d9ae676541f6c8c609d763365dcd9c0
Add more Playwright tests to check all routes and buttons (#743)
<26>:<add> answer = "The capital of France is Paris. [Benefit_Options-2.pdf]." <del> answer = "The capital of France is Paris."
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer): <2> self.responses = [ <3> {"object": "chat.completion.chunk", "choices": []}, <4> {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, <5> {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, <6> ] <7> <8> def __aiter__(self): <9> return self <10> <11> async def __anext__(self): <12> if self.responses: <13> return self.responses.pop(0) <14> else: <15> raise StopAsyncIteration <16> <17> async def mock_acreate(*args, **kwargs): <18> if openai.api_type == "openai": <19> assert kwargs.get("deployment_id") is None <20> else: <21> assert kwargs.get("deployment_id") is not None <22> messages = kwargs["messages"] <23> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <24> answer = "capital of France" <25> else: <26> answer = "The capital of France is Paris." <27> if "stream" in kwargs and kwargs["stream"] is True: <28> return AsyncChatCompletionIterator(answer) <29> else: <30> return openai.util.convert_to_openai_object( <31> {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} <32> ) <33> <34> monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) <35>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: openai api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.util convert_to_openai_object(resp, api_key=None, api_version=None, organization=None, engine=None, plain_old_data=False) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
tests.test_app/test_ask_rtr_text
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <add> "context": { <del> "question": "What is the capital of France?", <4>:<add> "overrides": {"retrieval_mode": "text"}, <del> "overrides": {"retrieval_mode": "text"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "question": "What is the capital of France?", <4> "overrides": {"retrieval_mode": "text"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_ask_rtr_text_filter
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<4>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <add> "context": { <del> "question": "What is the capital of France?", <5>:<add> "overrides": { <del> "overrides": { <6>:<add> "retrieval_mode": "text", <del> "retrieval_mode": "text", <7>:<add> "use_oid_security_filter": True, <del> "use_oid_security_filter": True, <8>:<add> "use_groups_security_filter": True, <del> "use_groups_security_filter": True, <9>:<add> "exclude_category": "excluded", <del> "exclude_category": "excluded", <10>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/ask", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "question": "What is the capital of France?", <5> "overrides": { <6> "retrieval_mode": "text", <7> "use_oid_security_filter": True, <8> "use_groups_security_filter": True, <9> "exclude_category": "excluded", <10> }, <11> }, <12> ) <13> assert response.status_code == 200 <14> assert ( <15> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <16> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <17> ) <18> result = await response.get_json() <19> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <20>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_ask_rtr_text_semanticranker
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <add> "context": { <del> "question": "What is the capital of France?", <4>:<add> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <del> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "question": "What is the capital of France?", <4> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_filter response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_ask_rtr_text_semanticcaptions
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <add> "context": { <del> "question": "What is the capital of France?", <4>:<add> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <del> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "question": "What is the capital of France?", <4> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_semanticranker response = await client.post( "/ask", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_ask_rtr_hybrid
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <add> "context": { <del> "question": "What is the capital of France?", <4>:<add> "overrides": {"retrieval_mode": "hybrid"}, <del> "overrides": {"retrieval_mode": "hybrid"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "question": "What is the capital of France?", <4> "overrides": {"retrieval_mode": "hybrid"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_semanticcaptions response = await client.post( "/ask", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_chat_text
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text"}, <del> "overrides": {"retrieval_mode": "text"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_chat_text_filter
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<4>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <5>:<add> "context": { <add> "overrides": { <del> "overrides": { <6>:<add> "retrieval_mode": "text", <del> "retrieval_mode": "text", <7>:<add> "use_oid_security_filter": True, <del> "use_oid_security_filter": True, <8>:<add> "use_groups_security_filter": True, <del> "use_groups_security_filter": True, <9>:<add> "exclude_category": "excluded", <del> "exclude_category": "excluded", <10>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/chat", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": { <6> "retrieval_mode": "text", <7> "use_oid_security_filter": True, <8> "use_groups_security_filter": True, <9> "exclude_category": "excluded", <10> }, <11> }, <12> ) <13> assert response.status_code == 200 <14> assert ( <15> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <16> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <17> ) <18> result = await response.get_json() <19> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <20>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_text response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text"}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError +
tests.test_app/test_chat_text_semanticranker
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <del> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_text_filter response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_text_semanticcaptions
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <del> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_prompt_template
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, <del> "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": { - "overrides": { + "retrieval_mode": "text", - "retrieval_mode": "text", + "use_oid_security_filter": True, - "use_oid_security_filter": True, + "use_groups_security_filter": True, - "use_groups_security_filter": True, + "exclude_category": "excluded", - "exclude_category": "excluded", + }, }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_prompt_template_concat
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, <del> "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_text_semanticcaptions response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_hybrid
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "hybrid"}, <del> "overrides": {"retrieval_mode": "hybrid"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "hybrid"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_prompt_template response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_vector
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "vector"}, <del> "overrides": {"retrieval_mode": "vector"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "vector"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_json() <9> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_prompt_template_concat response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_stream_text
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<1>:<add> "/chat", <del> "/chat_stream", <3>:<add> "stream": True, <add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <4>:<add> "context": { <add> "overrides": {"retrieval_mode": "text"}, <del> "overrides": {"retrieval_mode": "text"}, <5>:<add> },
# module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): <0> response = await client.post( <1> "/chat_stream", <2> json={ <3> "history": [{"user": "What is the capital of France?"}], <4> "overrides": {"retrieval_mode": "text"}, <5> }, <6> ) <7> assert response.status_code == 200 <8> result = await response.get_data() <9> snapshot.assert_match(result, "result.jsonlines") <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "vector"}, - "overrides": {"retrieval_mode": "vector"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_stream_text_filter
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<1>:<add> "/chat", <del> "/chat_stream", <4>:<add> "stream": True, <add> "messages": [{"content": "What is the capital of France?", "role": "user"}], <del> "history": [{"user": "What is the capital of France?"}], <5>:<add> "context": { <add> "overrides": { <del> "overrides": { <6>:<add> "retrieval_mode": "text", <del> "retrieval_mode": "text", <7>:<add> "use_oid_security_filter": True, <del> "use_oid_security_filter": True, <8>:<add> "use_groups_security_filter": True, <del> "use_groups_security_filter": True, <9>:<add> "exclude_category": "excluded", <del> "exclude_category": "excluded", <10>:<add> }
# module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/chat_stream", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": { <6> "retrieval_mode": "text", <7> "use_oid_security_filter": True, <8> "use_groups_security_filter": True, <9> "exclude_category": "excluded", <10> }, <11> }, <12> ) <13> assert response.status_code == 200 <14> assert ( <15> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <16> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <17> ) <18> result = await response.get_data() <19> snapshot.assert_match(result, "result.jsonlines") <20>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_vector response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "vector"}, }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): response = await client.post( + "/chat", - "/chat_stream", json={ + "stream": True, + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "vector"}, - "overrides": {"retrieval_mode": "vector"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, - "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_captions": True}, - "overrides": {"retrieval_mode": "text", "semantic_captions": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, - "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> chat_resp = dict(await chat_coroutine) <del> chat_resp = await chat_coroutine <4>:<add> chat_resp["choices"][0]["context"] = extra_info <del> chat_resp.choices[0]["extra_args"] = extra_info
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=False <2> ) <3> chat_resp = await chat_coroutine <4> chat_resp.choices[0]["extra_args"] = extra_info <5> return chat_resp <6>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_until_final_call extra_info = { "data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace("\n", "<br>"), } chat_coroutine = openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stream=should_stream, ) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top", 3) filter = self.build_filter(overrides, auth_claims) + original_user_query = history[-1]["content"] + user_query_request = "Generate search query for: " + original_user_query - user_query_request = "Generate search query for: " + history[-1]["user"] functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure Cognitive Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( self.query_prompt_template, self.chatgpt_model, history, user_query_request, self.query_prompt_few_shots, self.chatgpt_token_limit - len(user_query_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, ) -> tuple: # offset: 1 <s>_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=0.0, max_tokens=32, n=1, functions=functions, function_call="auto", ) + query_text = self.get_search_query(chat_completion, original_user_query) - query_text = self.get_search_query(chat_completion, history[-1]["user"]) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<4>:<del> "choices": [ <5>:<add> "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], <del> {"delta": {"role": self.ASSISTANT}, "extra_args": extra_info, "finish_reason": None, "index": 0} <6>:<del> ],
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=True <2> ) <3> yield { <4> "choices": [ <5> {"delta": {"role": self.ASSISTANT}, "extra_args": extra_info, "finish_reason": None, "index": 0} <6> ], <7> "object": "chat.completion.chunk", <8> } <9> <10> async for event in await chat_coroutine: <11> # "2023-07-01-preview" API version has a bug where first response has empty choices <12> if event["choices"]: <13> yield event <14>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_without_streaming chat_resp = dict(await chat_coroutine) at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top", 3) filter = self.build_filter(overrides, auth_claims) + original_user_query = history[-1]["content"] + user_query_request = "Generate search query for: " + original_user_query - user_query_request = "Generate search query for: " + history[-1]["user"] functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure Cognitive Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( self.query_prompt_template, self.chatgpt_model, history, user_query_request, self.query_prompt_few_shots, self.chatgpt_token_limit - len(user_query_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, ) -> tuple: # offset: 1 <s>_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=0.0, max_tokens=32, n=1, functions=functions, function_call="auto", ) + query_text = self.get_search_query(chat_completion, original_user_query) - query_text = self.get_search_query(chat_completion, history[-1]["user"]) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic</s> ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool = False, ) -> tuple: # offset: 2 <s>er") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) follow_up_questions_prompt = ( self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" </s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<0>:<add> q = messages[-1]["content"] <add> overrides = context.get("overrides", {}) <add> auth_claims = context.get("auth_claims", {})
<s>(Approach): + def run( + self, + messages: list[dict], + stream: bool = False, # Stream is not used in this approach + session_state: Any = None, + context: dict[str, Any] = {}, + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: - def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top", 3) <4> filter = self.build_filter(overrides, auth_claims) <5> <6> # If retrieval mode includes vectors, compute an embedding for the query <7> if has_vector: <8> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <9> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <10> query_vector = embedding["data"][0]["embedding"] <11> else: <12> query_vector = None <13> <14> # Only keep the text query if the retrieval mode uses text, otherwise drop it <15> query_text = q if has_text else "" <16> <17> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <18> if overrides.get("semantic_ranker") and has_text: <19> r = await self.search_client.search( <20> query_text, <21> filter=filter, <22> query_type=QueryType.SEMANTIC, <23> query_language="en-us", <24> query_speller="lexicon", <25> semantic_configuration_name="</s>
===========below chunk 0=========== <s> <add> def run( + self, + messages: list[dict], + stream: bool = False, # Stream is not used in this approach + session_state: Any = None, + context: dict[str, Any] = {}, + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: - def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: # offset: 1 top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant</s> ===========below chunk 1=========== <s> <add> def run( + self, + messages: list[dict], + stream: bool = False, # Stream is not used in this approach + session_state: Any = None, + context: dict[str, Any] = {}, + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: - def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: # offset: 2 <s> model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["extra_args"] = extra_info return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 1=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError + ===========changed ref 1=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_resp = dict(await chat_coroutine) - chat_resp = await chat_coroutine + chat_resp["choices"][0]["context"] = extra_info - chat_resp.choices[0]["extra_args"] = extra_info return chat_resp
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> context = request_json.get("context", {}) <4>:<add> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <del> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <6>:<add> approach = current_app.config[CONFIG_ASK_APPROACH] <del> impl = current_app.config[CONFIG_ASK_APPROACH] <10>:<add> r = await approach.run(request_json["messages"], context=context) <del> r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims)
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <4> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <5> try: <6> impl = current_app.config[CONFIG_ASK_APPROACH] <7> # Workaround for: https://github.com/openai/openai-python/issues/371 <8> async with aiohttp.ClientSession() as s: <9> openai.aiosession.set(s) <10> r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) <11> return jsonify(r) <12> except Exception as e: <13> logging.exception("Exception in /ask") <14> return jsonify({"error": str(e)}), 500 <15>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError + ===========changed ref 1=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_resp = dict(await chat_coroutine) - chat_resp = await chat_coroutine + chat_resp["choices"][0]["context"] = extra_info - chat_resp.choices[0]["extra_args"] = extra_info return chat_resp ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + overrides = context.get("overrides", {}) + auth_claims = context.get("auth_claims", {}) + if stream is False: + # Workaround for: https://github.com/openai/openai-python/issues/371 + async with aiohttp.ClientSession() as s: + openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims) + return response + else: + return self.run_with_streaming(messages, overrides, auth_claims) + ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "vector"}, - "overrides": {"retrieval_mode": "vector"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<3>:<add> context = request_json.get("context", {}) <4>:<add> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <del> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <6>:<add> approach = current_app.config[CONFIG_CHAT_APPROACH] <del> impl = current_app.config[CONFIG_CHAT_APPROACH] <7>:<del> # Workaround for: https://github.com/openai/openai-python/issues/371 <8>:<del> async with aiohttp.ClientSession() as s: <9>:<del> openai.aiosession.set(s) <10>:<del> r = await impl.run_without_streaming( <11>:<del> request_json["history"], request_json.get("overrides", {}), auth_claims <12>:<del> ) <13>:<add> result = await approach.run(request_json["messages"], stream=request_json.get("stream", False), context=context) <add> print(result) <add> print(type(result)) <add> if isinstance(result, dict): <add> return jsonify(result) <del> return jsonify(r) <14>:<add> else: <add> response = await make_response(format_as_ndjson(result)) <add> response.timeout = None # type: ignore <add> return response
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <4> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <5> try: <6> impl = current_app.config[CONFIG_CHAT_APPROACH] <7> # Workaround for: https://github.com/openai/openai-python/issues/371 <8> async with aiohttp.ClientSession() as s: <9> openai.aiosession.set(s) <10> r = await impl.run_without_streaming( <11> request_json["history"], request_json.get("overrides", {}), auth_claims <12> ) <13> return jsonify(r) <14> except Exception as e: <15> logging.exception("Exception in /chat") <16> return jsonify({"error": str(e)}), 500 <17>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() + context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] + context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) - auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) try: + approach = current_app.config[CONFIG_ASK_APPROACH] - impl = current_app.config[CONFIG_ASK_APPROACH] # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + r = await approach.run(request_json["messages"], context=context) - r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 1=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError + ===========changed ref 2=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_resp = dict(await chat_coroutine) - chat_resp = await chat_coroutine + chat_resp["choices"][0]["context"] = extra_info - chat_resp.choices[0]["extra_args"] = extra_info return chat_resp ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + overrides = context.get("overrides", {}) + auth_claims = context.get("auth_claims", {}) + if stream is False: + # Workaround for: https://github.com/openai/openai-python/issues/371 + async with aiohttp.ClientSession() as s: + openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims) + return response + else: + return self.run_with_streaming(messages, overrides, auth_claims) + ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.e2e/test_chat
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<0>:<add> # Set up a mock route to the /chat endpoint with streaming results <del> # Set up a mock route to the /chat_stream endpoint <8>:<add> page.route("*/**/chat", handle) <del> page.route("*/**/chat_stream", handle)
# module: tests.e2e def test_chat(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat_stream endpoint <1> def handle(route: Route): <2> # Read the JSONL from our snapshot results and return as the response <3> f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") <4> jsonl = f.read() <5> f.close() <6> route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) <7> <8> page.route("*/**/chat_stream", handle) <9> <10> # Check initial page state <11> page.goto(live_server_url) <12> expect(page).to_have_title("GPT + Enterprise data | Sample") <13> expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() <14> expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() <15> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <16> <17> # Ask a question and wait for the message to appear <18> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <19> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <20> "Whats the dental plan?" <21> ) <22> page.get_by_role("button", name="Ask question button").click() <23> <24> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <25> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <26> expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() <27> <28> # Show the citation document</s>
===========below chunk 0=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # offset: 1 expect(page.get_by_role("tab", name="Citation")).to_be_visible() expect(page.get_by_title("Citation")).to_be_visible() # Show the thought process page.get_by_label("Show thought process").click() expect(page.get_by_title("Thought process")).to_be_visible() expect(page.get_by_text("Searched for:")).to_be_visible() # Show the supporting content page.get_by_label("Show supporting content").click() expect(page.get_by_title("Supporting content")).to_be_visible() expect(page.get_by_role("heading", name="Benefit_Options-2.pdf")).to_be_visible() # Clear the chat page.get_by_role("button", name="Clear chat").click() expect(page.get_by_text("Whats the dental plan?")).not_to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).not_to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() ===========unchanged ref 0=========== at: io.BufferedReader read(self, size: Optional[int]=..., /) -> bytes at: io.TextIOWrapper close(self) -> None at: typing.IO __slots__ = () close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError + ===========changed ref 1=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_resp = dict(await chat_coroutine) - chat_resp = await chat_coroutine + chat_resp["choices"][0]["context"] = extra_info - chat_resp.choices[0]["extra_args"] = extra_info return chat_resp ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + overrides = context.get("overrides", {}) + auth_claims = context.get("auth_claims", {}) + if stream is False: + # Workaround for: https://github.com/openai/openai-python/issues/371 + async with aiohttp.ClientSession() as s: + openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims) + return response + else: + return self.run_with_streaming(messages, overrides, auth_claims) + ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { - "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "hybrid"}, - "overrides": {"retrieval_mode": "hybrid"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "vector"}, - "overrides": {"retrieval_mode": "vector"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], - "history": [{"user": "What is the capital of France?"}], + "context": { + "overrides": {"retrieval_mode": "text"}, - "overrides": {"retrieval_mode": "text"}, + }, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.e2e/test_chat_customization
Modified
Azure-Samples~azure-search-openai-demo
3324bdb3ec36163adb88db17c9cbe4dd8c1b0bff
Chat response (#748)
<2>:<add> overrides = route.request.post_data_json["context"]["overrides"] <del> overrides = route.request.post_data_json["overrides"]
# module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint <1> def handle(route: Route): <2> overrides = route.request.post_data_json["overrides"] <3> assert overrides["retrieval_mode"] == "vectors" <4> assert overrides["semantic_ranker"] is False <5> assert overrides["semantic_captions"] is True <6> assert overrides["top"] == 1 <7> assert overrides["prompt_template"] == "You are a cat and only talk about tuna." <8> assert overrides["exclude_category"] == "dogs" <9> assert overrides["suggest_followup_questions"] is True <10> assert overrides["use_oid_security_filter"] is False <11> assert overrides["use_groups_security_filter"] is False <12> <13> # Read the JSON from our snapshot results and return as the response <14> f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") <15> json = f.read() <16> f.close() <17> route.fulfill(body=json, status=200) <18> <19> page.route("*/**/chat", handle) <20> <21> # Check initial page state <22> page.goto(live_server_url) <23> expect(page).to_have_title("GPT + Enterprise data | Sample") <24> <25> # Customize all the settings <26> page.get_by_role("button", name="Developer settings").click() <27> page.get_by_label("Override prompt template").click() <28> page.get_by_label("Override prompt template").fill("You are a cat and only talk about tuna.") <29> page.get_by_label("Retrieve this many search results:").click() <30> page.get_by_label("Retrieve this many search results:").fill("1") <31> page.get_by_label("Exclude category").click() <32> page.get_by_label("Exclude category").fill("dogs") <33> page.get_by_</s>
===========below chunk 0=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # offset: 1 page.get_by_text("Suggest follow-up questions").click() page.get_by_text("Use semantic ranker for retrieval").click() page.get_by_text("Vectors + Text (Hybrid)").click() page.get_by_role("option", name="Vectors", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Ask question button").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========unchanged ref 0=========== at: io.BufferedRandom read(self, size: Optional[int]=..., /) -> bytes at: io.TextIOWrapper close(self) -> None at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): + # Set up a mock route to the /chat endpoint with streaming results - # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSONL from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") jsonl = f.read() f.close() route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) + page.route("*/**/chat", handle) - page.route("*/**/chat_stream", handle) # Check initial page state page.goto(live_server_url) expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Ask question button").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() #</s> ===========changed ref 1=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # offset: 1 <s> expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() # Show the citation document page.get_by_text("1. Benefit_Options-2.pdf").click() expect(page.get_by_role("tab", name="Citation")).to_be_visible() expect(page.get_by_title("Citation")).to_be_visible() # Show the thought process page.get_by_label("Show thought process").click() expect(page.get_by_title("Thought process")).to_be_visible() expect(page.get_by_text("Searched for:")).to_be_visible() # Show the supporting content page.get_by_label("Show supporting content").click() expect(page.get_by_title("Supporting content")).to_be_visible() expect(page.get_by_role("heading", name="Benefit_Options-2.pdf")).to_be_visible() # Clear the chat page.get_by_role("button", name="Clear chat").click() expect(page.get_by_text("Whats the dental plan?")).not_to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).not_to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() ===========changed ref 2=========== # module: app.backend.approaches.approach class Approach(ABC): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + raise NotImplementedError + ===========changed ref 3=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_request_must_be_json(client): - response = await client.post("/chat_stream") - assert response.status_code == 415 - result = await response.get_json() - assert result["error"] == "request must be json" - ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) + chat_resp = dict(await chat_coroutine) - chat_resp = await chat_coroutine + chat_resp["choices"][0]["context"] = extra_info - chat_resp.choices[0]["extra_args"] = extra_info return chat_resp ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def run( + self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} + ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: + overrides = context.get("overrides", {}) + auth_claims = context.get("auth_claims", {}) + if stream is False: + # Workaround for: https://github.com/openai/openai-python/issues/371 + async with aiohttp.ClientSession() as s: + openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims) + return response + else: + return self.run_with_streaming(messages, overrides, auth_claims) +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
ff273c2636e6035eb1f96da22bb9f47e7e353c66
Rm print statements (#772)
<9>:<del> print(result) <10>:<del> print(type(result))
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run(request_json["messages"], stream=request_json.get("stream", False), context=context) <9> print(result) <10> print(type(result)) <11> if isinstance(result, dict): <12> return jsonify(result) <13> else: <14> response = await make_response(format_as_ndjson(result)) <15> response.timeout = None # type: ignore <16> return response <17> except Exception as e: <18> logging.exception("Exception in /chat") <19> return jsonify({"error": str(e)}), 500 <20>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<5>:<add> chat_resp["choices"][0]["session_state"] = session_state
<s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=False <2> ) <3> chat_resp = dict(await chat_coroutine) <4> chat_resp["choices"][0]["context"] = extra_info <5> return chat_resp <6>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<4>:<add> "choices": [ <add> { <add> "delta": {"role": self.ASSISTANT}, <add> "context": extra_info, <add> "session_state": session_state, <add> "finish_reason": None, <add> "index": 0, <add> } <add> ], <del> "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}],
<s>.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=True <2> ) <3> yield { <4> "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], <5> "object": "chat.completion.chunk", <6> } <7> <8> async for event in await chat_coroutine: <9> # "2023-07-01-preview" API version has a bug where first response has empty choices <10> if event["choices"]: <11> yield event <12>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_without_streaming extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<6>:<add> response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) <del> response = await self.run_without_streaming(messages, overrides, auth_claims) <9>:<add> return self.run_with_streaming(messages, overrides, auth_claims, session_state) <del> return self.run_with_streaming(messages, overrides, auth_claims)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> overrides = context.get("overrides", {}) <1> auth_claims = context.get("auth_claims", {}) <2> if stream is False: <3> # Workaround for: https://github.com/openai/openai-python/issues/371 <4> async with aiohttp.ClientSession() as s: <5> openai.aiosession.set(s) <6> response = await self.run_without_streaming(messages, overrides, auth_claims) <7> return response <8> else: <9> return self.run_with_streaming(messages, overrides, auth_claims) <10>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_with_streaming extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) ===========changed ref 0=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== <s>.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { + "choices": [ + { + "delta": {"role": self.ASSISTANT}, + "context": extra_info, + "session_state": session_state, + "finish_reason": None, + "index": 0, + } + ], - "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], "object": "chat.completion.chunk", } async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices if event["choices"]: yield event ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> <9> # If retrieval mode includes vectors, compute an embedding for the query <10> if has_vector: <11> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <12> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <13> query_vector = embedding["data"][0]["embedding"] <14> else: <15> query_vector = None <16> <17> # Only keep the text query if the retrieval mode uses text, otherwise drop it <18> query_text = q if has_text else "" <19> <20> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <21> if overrides.get("semantic_ranker") and has_text: <22> r = await self.search_client.search( <23> query_text, <24> filter=filter, <25> query_type=QueryType.SEMANTIC, <26> query_language="en-us",</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["context"] = extra_info return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params)
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<11>:<add> r = await approach.run( <add> request_json["messages"], context=context, session_state=request_json.get("session_state") <add> ) <del> r = await approach.run(request_json["messages"], context=context)
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_ASK_APPROACH] <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await approach.run(request_json["messages"], context=context) <12> return jsonify(r) <13> except Exception as e: <14> logging.exception("Exception in /ask") <15> return jsonify({"error": str(e)}), 500 <16>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) if stream is False: # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) - response = await self.run_without_streaming(messages, overrides, auth_claims) return response else: + return self.run_with_streaming(messages, overrides, auth_claims, session_state) - return self.run_with_streaming(messages, overrides, auth_claims) ===========changed ref 5=========== <s>.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { + "choices": [ + { + "delta": {"role": self.ASSISTANT}, + "context": extra_info, + "session_state": session_state, + "finish_reason": None, + "index": 0, + } + ], - "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], "object": "chat.completion.chunk", } async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices if event["choices"]: yield event
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<8>:<add> result = await approach.run( <add> request_json["messages"], <add> stream=request_json.get("stream", False), <add> context=context, <add> session_state=request_json.get("session_state"), <add> ) <del> result = await approach.run(request_json["messages"], stream=request_json.get("stream", False), context=context)
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run(request_json["messages"], stream=request_json.get("stream", False), context=context) <9> if isinstance(result, dict): <10> return jsonify(result) <11> else: <12> response = await make_response(format_as_ndjson(result)) <13> response.timeout = None # type: ignore <14> return response <15> except Exception as e: <16> logging.exception("Exception in /chat") <17> return jsonify({"error": str(e)}), 500 <18>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: approach = current_app.config[CONFIG_ASK_APPROACH] # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + r = await approach.run( + request_json["messages"], context=context, session_state=request_json.get("session_state") + ) - r = await approach.run(request_json["messages"], context=context) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 1=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) if stream is False: # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) - response = await self.run_without_streaming(messages, overrides, auth_claims) return response else: + return self.run_with_streaming(messages, overrides, auth_claims, session_state) - return self.run_with_streaming(messages, overrides, auth_claims) ===========changed ref 6=========== <s>.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { + "choices": [ + { + "delta": {"role": self.ASSISTANT}, + "context": extra_info, + "session_state": session_state, + "finish_reason": None, + "index": 0, + } + ], - "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], "object": "chat.completion.chunk", } async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices if event["choices"]: yield event
tests.e2e/test_chat
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<2>:<add> # Assert that session_state is specified in the request (None for now) <add> session_state = route.request.post_data_json["session_state"] <add> assert session_state is None
# module: tests.e2e def test_chat(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint with streaming results <1> def handle(route: Route): <2> # Read the JSONL from our snapshot results and return as the response <3> f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") <4> jsonl = f.read() <5> f.close() <6> route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) <7> <8> page.route("*/**/chat", handle) <9> <10> # Check initial page state <11> page.goto(live_server_url) <12> expect(page).to_have_title("GPT + Enterprise data | Sample") <13> expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() <14> expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() <15> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <16> <17> # Ask a question and wait for the message to appear <18> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <19> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <20> "Whats the dental plan?" <21> ) <22> page.get_by_role("button", name="Ask question button").click() <23> <24> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <25> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <26> expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() <27> <28> # Show the citation document </s>
===========below chunk 0=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # offset: 1 expect(page.get_by_role("tab", name="Citation")).to_be_visible() expect(page.get_by_title("Citation")).to_be_visible() # Show the thought process page.get_by_label("Show thought process").click() expect(page.get_by_title("Thought process")).to_be_visible() expect(page.get_by_text("Searched for:")).to_be_visible() # Show the supporting content page.get_by_label("Show supporting content").click() expect(page.get_by_title("Supporting content")).to_be_visible() expect(page.get_by_role("heading", name="Benefit_Options-2.pdf")).to_be_visible() # Clear the chat page.get_by_role("button", name="Clear chat").click() expect(page.get_by_text("Whats the dental plan?")).not_to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).not_to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() ===========unchanged ref 0=========== at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: io.TextIOWrapper close(self) -> None at: typing.IO __slots__ = () close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {} ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) if stream is False: # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) - response = await self.run_without_streaming(messages, overrides, auth_claims) return response else: + return self.run_with_streaming(messages, overrides, auth_claims, session_state) - return self.run_with_streaming(messages, overrides, auth_claims) ===========changed ref 5=========== <s>.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> AsyncGenerator[dict, None]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=True ) yield { + "choices": [ + { + "delta": {"role": self.ASSISTANT}, + "context": extra_info, + "session_state": session_state, + "finish_reason": None, + "index": 0, + } + ], - "choices": [{"delta": {"role": self.ASSISTANT}, "context": extra_info, "finish_reason": None, "index": 0}], "object": "chat.completion.chunk", } async for event in await chat_coroutine: # "2023-07-01-preview" API version has a bug where first response has empty choices if event["choices"]: yield event
tests.e2e/test_ask
Modified
Azure-Samples~azure-search-openai-demo
7c614bbe7470ed5cf8cba524943de09aea29a604
Adding session_state to conform to ChatApp protocol (#773)
<2>:<add> # Assert that session_state is specified in the request (None for now) <add> session_state = route.request.post_data_json["session_state"] <add> assert session_state is None
# module: tests.e2e def test_ask(page: Page, live_server_url: str): <0> # Set up a mock route to the /ask endpoint <1> def handle(route: Route): <2> # Read the JSON from our snapshot results and return as the response <3> f = open("tests/snapshots/test_app/test_ask_rtr_hybrid/client0/result.json") <4> json = f.read() <5> f.close() <6> route.fulfill(body=json, status=200) <7> <8> page.route("*/**/ask", handle) <9> page.goto(live_server_url) <10> expect(page).to_have_title("GPT + Enterprise data | Sample") <11> <12> page.get_by_role("link", name="Ask a question").click() <13> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() <14> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").fill("Whats the dental plan?") <15> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() <16> page.get_by_label("Ask question button").click() <17> <18> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <19> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <20>
===========unchanged ref 0=========== at: io.FileIO close(self) -> None read(self, size: int=..., /) -> bytes at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # Set up a mock route to the /chat endpoint with streaming results def handle(route: Route): + # Assert that session_state is specified in the request (None for now) + session_state = route.request.post_data_json["session_state"] + assert session_state is None # Read the JSONL from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") jsonl = f.read() f.close() route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Ask question button").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name</s> ===========changed ref 1=========== # module: tests.e2e def test_chat(page: Page, live_server_url: str): # offset: 1 <s> of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() # Show the citation document page.get_by_text("1. Benefit_Options-2.pdf").click() expect(page.get_by_role("tab", name="Citation")).to_be_visible() expect(page.get_by_title("Citation")).to_be_visible() # Show the thought process page.get_by_label("Show thought process").click() expect(page.get_by_title("Thought process")).to_be_visible() expect(page.get_by_text("Searched for:")).to_be_visible() # Show the supporting content page.get_by_label("Show supporting content").click() expect(page.get_by_title("Supporting content")).to_be_visible() expect(page.get_by_role("heading", name="Benefit_Options-2.pdf")).to_be_visible() # Clear the chat page.get_by_role("button", name="Clear chat").click() expect(page.get_by_text("Whats the dental plan?")).not_to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).not_to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() ===========changed ref 2=========== <s>es.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + session_state: Any = None, - self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": True, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_session_state_persists(client, snapshot): + response = await client.post( + "/ask", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_session_state_persists(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + "stream": False, + "session_state": {"conversation_id": 1234}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
scripts.prepdocs/create_search_index
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<7>:<add> SearchableField(name="content", type="Edm.String", analyzer_name=args.searchanalyzername), <del> SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"),
# module: scripts.prepdocs def create_search_index(): <0> if args.verbose: <1> print(f"Ensuring search index {args.index} exists") <2> index_client = SearchIndexClient( <3> endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds <4> ) <5> fields = [ <6> SimpleField(name="id", type="Edm.String", key=True), <7> SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), <8> SearchField( <9> name="embedding", <10> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <11> hidden=False, <12> searchable=True, <13> filterable=False, <14> sortable=False, <15> facetable=False, <16> vector_search_dimensions=1536, <17> vector_search_configuration="default", <18> ), <19> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <20> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <21> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), <22> ] <23> if args.useacls: <24> fields.append( <25> SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) <26> ) <27> fields.append( <28> SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) <29> ) <30> <31> if args.index not in index_client.list_index_names(): <32> index = SearchIndex( <33> name=args.index, <34> fields=fields, <35> semantic_settings=SemanticSettings( <36> configurations=[ <37> SemanticConfiguration( <38> name="default", <39> prioritized_fields=PrioritizedFields( <40> title_field=None, prioritized_content_</s>
===========below chunk 0=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() search_creds: Union[TokenCredential, AzureKeyCredential] = azd_credential
tests.test_chatapproach/test_get_search_query
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<0>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_search_query(): <0> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <1> <2> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <3> default_query = "hello" <4> query = chat_approach.get_search_query(json.loads(payload), default_query) <5> <6> assert query == "accesstelemedicineservices" <7>
===========unchanged ref 0=========== at: json loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any ===========changed ref 0=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient( endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds ) fields = [ SimpleField(name="id", type="Edm.String", key=True), + SearchableField(name="content", type="Edm.String", analyzer_name=args.searchanalyzername), - SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_configuration="default", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), ] if args.useacls: fields.append( SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) fields.append( SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")</s> ===========changed ref 1=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s>PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========changed ref 2=========== # module: scripts.prepdocs if __name__ == "__main__": parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v", ) parser.add_argument("files", nargs="?", help="Files to be processed") parser.add_argument( "--datalakestorageaccount", required=False, help="Optional. Azure Data Lake Storage Gen2 Account name" ) parser.add_argument( "--datalakefilesystem", required=False, default="gptkbcontainer", help="Optional. Azure Data Lake Storage Gen2 filesystem name", ) parser.add_argument( "--datalakepath", required=False, help="Optional. Azure Data Lake Storage Gen2 filesystem path containing files to index. If omitted, index the entire filesystem", ) parser.add_argument( "--datalakekey", required=False, help="Optional. Use this key when authenticating to Azure Data Lake Gen2" ) parser.add_argument( "--useacls", action="store_true", help="Store ACLs from Azure Data Lake Gen2 Filesystem in the search index" ) parser.add_argument( "--category", help="Value for the category field in the search index for all sections indexed in this run" ) parser.add_argument( "--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage" ) parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument( "--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)", ) </s> ===========changed ref 3=========== # module: scripts.prepdocs # offset: 1 <s> this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)", ) parser.add_argument( "--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)" ) parser.add_argument( "--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)", ) parser.add_argument( "--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)", ) parser.add_argument( "--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)", + ) + parser.add_argument( + "--searchanalyzername", + required=False, + default="en.microsoft", + help="Optional. Name of the Azure Cognitive Search analyzer to use for the content field in the index", ) parser.add_argument("--openaihost", help="Host of the API used to compute embeddings ('azure' or 'openai')") parser.add_argument("--openaiservice", help="Name of the Azure OpenAI service used to compute embeddings") parser.add_argument( "--openaideployment", help="Name of the Azure OpenAI model deployment for an embedding model ('text-embedding-ada-002' recommended)", ) parser.add_argument( "--openaimodelname", help="Name of the Azure OpenAI embedding model ('text-embedding-ada-002' recommended)" ) parser.add_argument( "--novectors", action="store_true", help="Don't compute embeddings for the sections (e.g. don't call the</s>
tests.test_chatapproach/test_get_search_query_returns_default
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<0>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_search_query_returns_default(): <0> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <1> <2> payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' <3> default_query = "hello" <4> query = chat_approach.get_search_query(json.loads(payload), default_query) <5> <6> assert query == default_query <7>
===========changed ref 0=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 1=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient( endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds ) fields = [ SimpleField(name="id", type="Edm.String", key=True), + SearchableField(name="content", type="Edm.String", analyzer_name=args.searchanalyzername), - SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_configuration="default", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), ] if args.useacls: fields.append( SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) fields.append( SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")</s> ===========changed ref 2=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s>PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========changed ref 3=========== # module: scripts.prepdocs if __name__ == "__main__": parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v", ) parser.add_argument("files", nargs="?", help="Files to be processed") parser.add_argument( "--datalakestorageaccount", required=False, help="Optional. Azure Data Lake Storage Gen2 Account name" ) parser.add_argument( "--datalakefilesystem", required=False, default="gptkbcontainer", help="Optional. Azure Data Lake Storage Gen2 filesystem name", ) parser.add_argument( "--datalakepath", required=False, help="Optional. Azure Data Lake Storage Gen2 filesystem path containing files to index. If omitted, index the entire filesystem", ) parser.add_argument( "--datalakekey", required=False, help="Optional. Use this key when authenticating to Azure Data Lake Gen2" ) parser.add_argument( "--useacls", action="store_true", help="Store ACLs from Azure Data Lake Gen2 Filesystem in the search index" ) parser.add_argument( "--category", help="Value for the category field in the search index for all sections indexed in this run" ) parser.add_argument( "--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage" ) parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument( "--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)", ) </s>
tests.test_chatapproach/test_get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<0>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_messages_from_history(): <0> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <1> <2> messages = chat_approach.get_messages_from_history( <3> system_prompt="You are a bot.", <4> model_id="gpt-35-turbo", <5> history=[ <6> { <7> "user": "What happens in a performance review?", <8> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <9> }, <10> {"user": "What does a Product Manager do?"}, <11> ], <12> user_content="What does a Product Manager do?", <13> ) <14> assert messages == [ <15> {"role": "system", "content": "You are a bot."}, <16> {"role": "user", "content": "What happens in a performance review?"}, <17> { <18> "role": "assistant", <19> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <20> },</s>
===========below chunk 0=========== # module: tests.test_chatapproach def test_get_messages_from_history(): # offset: 1 ] ===========unchanged ref 0=========== at: tests.test_chatapproach.test_get_search_query_returns_default default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) ===========changed ref 0=========== # module: tests.test_chatapproach def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 2=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient( endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds ) fields = [ SimpleField(name="id", type="Edm.String", key=True), + SearchableField(name="content", type="Edm.String", analyzer_name=args.searchanalyzername), - SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_configuration="default", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), ] if args.useacls: fields.append( SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) fields.append( SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")</s> ===========changed ref 3=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s>PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists")
tests.test_chatapproach/test_get_messages_from_history_truncated
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<0>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_messages_from_history_truncated(): <0> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <1> <2> messages = chat_approach.get_messages_from_history( <3> system_prompt="You are a bot.", <4> model_id="gpt-35-turbo", <5> history=[ <6> { <7> "user": "What happens in a performance review?", <8> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <9> }, <10> {"user": "What does a Product Manager do?"}, <11> ], <12> user_content="What does a Product Manager do?", <13> max_tokens=10, <14> ) <15> assert messages == [ <16> {"role": "system", "content": "You are a bot."}, <17> {"role": "user", "content": "What does a Product Manager do?"}, <18> ] <19>
===========changed ref 0=========== # module: tests.test_chatapproach def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 2=========== # module: tests.test_chatapproach def test_get_messages_from_history(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include</s> ===========changed ref 3=========== # module: tests.test_chatapproach def test_get_messages_from_history(): # offset: 1 <s> The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient( endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds ) fields = [ SimpleField(name="id", type="Edm.String", key=True), + SearchableField(name="content", type="Edm.String", analyzer_name=args.searchanalyzername), - SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), SearchField( name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, vector_search_dimensions=1536, vector_search_configuration="default", ), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), ] if args.useacls: fields.append( SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) fields.append( SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) ) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=fields, semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")</s>
tests.test_chatapproach/test_get_messages_from_history_truncated_longer
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<0>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): <0> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <1> <2> messages = chat_approach.get_messages_from_history( <3> system_prompt="You are a bot.", <4> model_id="gpt-35-turbo", <5> history=[ <6> { <7> "user": "What happens in a performance review?", <8> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <9> }, <10> { <11> "user": "Is there a dress code?", <12> "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <13> }, <14> {"user": "What does a Product Manager do?"}, <15> ], <16> user_content="What does a Product Manager do?", <17> max_tokens=30, <18> ) <19> assert messages == [ <20> {"role": "system", "content": "You are a bot."}, <21> {"role": "user", "content": "Is there a dress code?"}, <22> { <23> "role": "assistant", <24> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <25> }, <26> {"role": "user", "content": "</s>
===========below chunk 0=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): # offset: 1 ] ===========unchanged ref 0=========== at: tests.test_chatapproach.test_get_messages_from_history_truncated messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) ===========changed ref 0=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 2=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices"
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<8>:<add> self.query_language = query_language <add> self.query_speller = query_speller
<s>_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, + query_language: str, + query_speller: str, ): <0> self.search_client = search_client <1> self.openai_host = openai_host <2> self.chatgpt_deployment = chatgpt_deployment <3> self.chatgpt_model = chatgpt_model <4> self.embedding_deployment = embedding_deployment <5> self.embedding_model = embedding_model <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <9>
===========changed ref 0=========== # module: tests.test_chatapproach def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 2=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 3=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, { "user": "Is there a dress code?", "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=30, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sh</s> ===========changed ref 4=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): # offset: 1 <s>": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<8>:<add> self.query_language = query_language <add> self.query_speller = query_speller
<s>_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, + query_language: str, + query_speller: str, ): <0> self.search_client = search_client <1> self.openai_host = openai_host <2> self.chatgpt_deployment = chatgpt_deployment <3> self.chatgpt_model = chatgpt_model <4> self.embedding_model = embedding_model <5> self.embedding_deployment = embedding_deployment <6> self.sourcepage_field = sourcepage_field <7> self.content_field = content_field <8>
===========changed ref 0=========== <s>_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, + query_language: str, + query_speller: str, ): self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.query_language = query_language + self.query_speller = query_speller self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == default_query ===========changed ref 2=========== # module: tests.test_chatapproach def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' default_query = "hello" query = chat_approach.get_search_query(json.loads(payload), default_query) assert query == "accesstelemedicineservices" ===========changed ref 3=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) - chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, { "user": "Is there a dress code?", "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=30, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sh</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
<26>:<add> query_language=self.query_language, <del> query_language="en-us",
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> <9> # If retrieval mode includes vectors, compute an embedding for the query <10> if has_vector: <11> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <12> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <13> query_vector = embedding["data"][0]["embedding"] <14> else: <15> query_vector = None <16> <17> # Only keep the text query if the retrieval mode uses text, otherwise drop it <18> query_text = q if has_text else "" <19> <20> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <21> if overrides.get("semantic_ranker") and has_text: <22> r = await self.search_client.search( <23> query_text, <24> filter=filter, <25> query_type=QueryType.SEMANTIC, <26> query_language="en-us",</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["context"] = extra_info chat_completion.choices[0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.embedding_deployment = embedding_deployment at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params)
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
5300bb5ad2eb39e3f81a505907678fc640581f78
support non English languages (#780)
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> # Shared by all OpenAI deployments <6> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <7> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <9> # Used with Azure OpenAI deployments <10> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <11> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <13> # Used only with non-Azure OpenAI deployments <14> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") <15> OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") <16> AZURE_USE_AUTHENTICATION = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" <17> AZURE_SERVER_APP_ID = os.getenv("AZURE_SERVER_APP_ID") <18> AZURE_SERVER_APP_SECRET = os.getenv("AZURE_SERVER_APP_SECRET") <19> AZURE_CLIENT_APP_ID = os.getenv("AZURE_CLIENT_APP_ID")</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 TOKEN_CACHE_PATH = os.getenv("TOKEN_CACHE_PATH") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) # Set up authentication helper auth_helper = AuthenticationHelper( use_authentication=AZURE_USE_AUTHENTICATION, server_app_id=AZURE_SERVER_APP_ID, server_app_secret=AZURE_SERVER_APP_SECRET, client_app_id=AZURE_CLIENT_APP_ID, tenant_id=AZURE_TENANT_ID, token_cache_path=TOKEN_CACHE_PATH, ) # Set up clients for Cognitive Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s>_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api_type = "azure_ad" openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-07-01-preview" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token else: openai.api_type = "openai" openai.api_key = OPENAI_API_KEY openai.organization = OPENAI_ORGANIZATION current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_SEARCH_CLIENT] = search_client current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client current_app.config[CONFIG_AUTH_CLIENT] = auth_helper # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACH] = RetrieveThenReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) current_app.config</s> ===========below chunk 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 3 <s>_CHAT_APPROACH] = ChatReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], chatgpt_model: str, embedding_deployment: Optional[str], embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], chatgpt_model: str, embedding_deployment: Optional[str], embedding_model: str, sourcepage_field: str, content_field: str) at: core.authentication AuthenticationHelper(use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], token_cache_path: Optional[str]=None) at: openai api_key = os.environ.get("OPENAI_API_KEY") organization = os.environ.get("OPENAI_ORGANIZATION") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") ===========unchanged ref 1=========== api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== <s>_client: SearchClient, openai_host: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI chatgpt_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, sourcepage_field: str, content_field: str, + query_language: str, + query_speller: str, ): self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field + self.query_language = query_language + self.query_speller = query_speller
app.backend.core.messagebuilder/MessageBuilder.__init__
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<2>:<del> self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
# module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): <0> self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] <1> self.model = chatgpt_model <2> self.token_length = num_tokens_from_messages(self.messages[-1], self.model) <3>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder normalize_content(content: str)
app.backend.core.messagebuilder/MessageBuilder.append_message
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<1>:<del> self.token_length += num_tokens_from_messages(self.messages[index], self.model)
# module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): <0> self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) <1> self.token_length += num_tokens_from_messages(self.messages[index], self.model) <2>
===========unchanged ref 0=========== at: app.backend.core.messagebuilder.MessageBuilder normalize_content(content: str) at: app.backend.core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
tests.test_messagebuilder/test_messagebuilder
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<6>:<add> assert builder.count_tokens_for_message(builder.messages[0]) == 8 <del> assert builder.token_length == 8
# module: tests.test_messagebuilder def test_messagebuilder(): <0> builder = MessageBuilder("You are a bot.", "gpt-35-turbo") <1> assert builder.messages == [ <2> # 1 token, 1 token, 1 token, 5 tokens <3> {"role": "system", "content": "You are a bot."} <4> ] <5> assert builder.model == "gpt-35-turbo" <6> assert builder.token_length == 8 <7>
===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
tests.test_messagebuilder/test_messagebuilder_append
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<9>:<add> assert builder.count_tokens_for_message(builder.messages[0]) == 8 <add> assert builder.count_tokens_for_message(builder.messages[1]) == 9 <del> assert builder.token_length == 17
# module: tests.test_messagebuilder def test_messagebuilder_append(): <0> builder = MessageBuilder("You are a bot.", "gpt-35-turbo") <1> builder.append_message("user", "Hello, how are you?") <2> assert builder.messages == [ <3> # 1 token, 1 token, 1 token, 5 tokens <4> {"role": "system", "content": "You are a bot."}, <5> # 1 token, 1 token, 1 token, 6 tokens <6> {"role": "user", "content": "Hello, how are you?"}, <7> ] <8> assert builder.model == "gpt-35-turbo" <9> assert builder.token_length == 17 <10>
===========changed ref 0=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 3=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
tests.test_messagebuilder/test_messagebuilder_unicode
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<6>:<add> assert builder.count_tokens_for_message(builder.messages[0]) == 4 <del> assert builder.token_length == 4
# module: tests.test_messagebuilder def test_messagebuilder_unicode(): <0> builder = MessageBuilder("a\u0301", "gpt-35-turbo") <1> assert builder.messages == [ <2> # 1 token, 1 token, 1 token, 1 token <3> {"role": "system", "content": "á"} <4> ] <5> assert builder.model == "gpt-35-turbo" <6> assert builder.token_length == 4 <7>
===========changed ref 0=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 1=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 + assert builder.count_tokens_for_message(builder.messages[1]) == 9 - assert builder.token_length == 17 ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 3=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 4=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
tests.test_messagebuilder/test_messagebuilder_unicode_append
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<9>:<add> assert builder.count_tokens_for_message(builder.messages[0]) == 4 <add> assert builder.count_tokens_for_message(builder.messages[1]) == 4 <del> assert builder.token_length == 8
# module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): <0> builder = MessageBuilder("a\u0301", "gpt-35-turbo") <1> builder.append_message("user", "a\u0301") <2> assert builder.messages == [ <3> # 1 token, 1 token, 1 token, 1 token <4> {"role": "system", "content": "á"}, <5> # 1 token, 1 token, 1 token, 1 token <6> {"role": "user", "content": "á"}, <7> ] <8> assert builder.model == "gpt-35-turbo" <9> assert builder.token_length == 8 <10>
===========changed ref 0=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 1=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 2=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 + assert builder.count_tokens_for_message(builder.messages[1]) == 9 - assert builder.token_length == 17 ===========changed ref 3=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 4=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 5=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model)
tests.test_chatapproach/test_get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<8>:<add> {"role": "user", "content": "What happens in a performance review?"}, <9>:<add> "role": "assistant", <del> "user": "What happens in a performance review?", <10>:<add> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <del> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <12>:<add> {"role": "user", "content": "What does a Product Manager do?"}, <del> {"user": "What does a Product Manager do?"}, <15>:<add> max_tokens=3000,
# module: tests.test_chatapproach def test_get_messages_from_history(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", <6> model_id="gpt-35-turbo", <7> history=[ <8> { <9> "user": "What happens in a performance review?", <10> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <11> }, <12> {"user": "What does a Product Manager do?"}, <13> ], <14> user_content="What does a Product Manager do?", <15> ) <16> assert messages == [ <17> {"role": "system", "content": "You are a bot."}, <18> {"role": "user", "content": "What happens in a performance review?"}, <19> { <20> "role": "assistant", <21> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employ</s>
===========below chunk 0=========== # module: tests.test_chatapproach def test_get_messages_from_history(): # offset: 1 }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 3=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 4=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 5=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") builder.append_message("user", "a\u0301") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"}, # 1 token, 1 token, 1 token, 1 token {"role": "user", "content": "á"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 + assert builder.count_tokens_for_message(builder.messages[1]) == 4 - assert builder.token_length == 8 ===========changed ref 6=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 + assert builder.count_tokens_for_message(builder.messages[1]) == 9 - assert builder.token_length == 17
tests.test_chatapproach/test_get_messages_from_history_truncated
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<8>:<add> {"role": "user", "content": "What happens in a performance review?"}, <9>:<add> "role": "assistant", <del> "user": "What happens in a performance review?", <10>:<add> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <del> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <12>:<add> {"role": "user", "content": "What does a Product Manager do?"}, <del> {"user": "What does a Product Manager do?"},
# module: tests.test_chatapproach def test_get_messages_from_history_truncated(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", <6> model_id="gpt-35-turbo", <7> history=[ <8> { <9> "user": "What happens in a performance review?", <10> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <11> }, <12> {"user": "What does a Product Manager do?"}, <13> ], <14> user_content="What does a Product Manager do?", <15> max_tokens=10, <16> ) <17> assert messages == [ <18> {"role": "system", "content": "You are a bot."}, <19> {"role": "user", "content": "What does a Product Manager do?"}, <20> ] <21>
===========changed ref 0=========== # module: tests.test_chatapproach def test_get_messages_from_history(): chat_approach = ChatReadRetrieveReadApproach( None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" ) messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ + {"role": "user", "content": "What happens in a performance review?"}, { + "role": "assistant", - "user": "What happens in a performance review?", + "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", - "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, + {"role": "user", "content": "What does a Product Manager do?"}, - {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", + max_tokens=3000, ) </s> ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_messages_from_history(): # offset: 1 <s> ], user_content="What does a Product Manager do?", + max_tokens=3000, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 3=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 4=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 5=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 6=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 7=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") builder.append_message("user", "a\u0301") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"}, # 1 token, 1 token, 1 token, 1 token {"role": "user", "content": "á"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 + assert builder.count_tokens_for_message(builder.messages[1]) == 4 - assert builder.token_length == 8 ===========changed ref 8=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 + assert builder.count_tokens_for_message(builder.messages[1]) == 9 - assert builder.token_length == 17
tests.test_chatapproach/test_get_messages_from_history_truncated_longer
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<5>:<add> system_prompt="You are a bot.", # 8 tokens <del> system_prompt="You are a bot.", <8>:<add> {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens <9>:<add> "role": "assistant", <del> "user": "What happens in a performance review?", <10>:<add> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <del> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <11>:<add> }, # 102 tokens <add> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <del> }, <13>:<add> "role": "assistant", <del> "user": "Is there a dress code?", <14>:<add> "content": "Yes,
# module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): <0> chat_approach = ChatReadRetrieveReadApproach( <1> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <2> ) <3> <4> messages = chat_approach.get_messages_from_history( <5> system_prompt="You are a bot.", <6> model_id="gpt-35-turbo", <7> history=[ <8> { <9> "user": "What happens in a performance review?", <10> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <11> }, <12> { <13> "user": "Is there a dress code?", <14> "bot": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <15> }, <16> {"user": "What does a Product Manager do?"}, <17> ], <18> user_content="What does a Product Manager do?", <19> max_tokens=30, <20> ) <21> assert messages == [ <22> {"role": "system", "content": "You are a bot."}, <23> {"role": "user", "content": "Is there a dress code?"}, <24> { <25> "role": "assistant", <26> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <27> </s>
===========below chunk 0=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_longer(): # offset: 1 {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 0=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated(): chat_approach = ChatReadRetrieveReadApproach( None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" ) messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ + {"role": "user", "content": "What happens in a performance review?"}, { + "role": "assistant", - "user": "What happens in a performance review?", + "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", - "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, + {"role": "user", "content": "What does a Product Manager do?"}, - {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", max_tokens=10, )</s> ===========changed ref 1=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated(): # offset: 1 <s>}, ], user_content="What does a Product Manager do?", max_tokens=10, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 2=========== # module: tests.test_chatapproach def test_get_messages_from_history(): chat_approach = ChatReadRetrieveReadApproach( None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" ) messages = chat_approach.get_messages_from_history( system_prompt="You are a bot.", model_id="gpt-35-turbo", history=[ + {"role": "user", "content": "What happens in a performance review?"}, { + "role": "assistant", - "user": "What happens in a performance review?", + "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", - "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, + {"role": "user", "content": "What does a Product Manager do?"}, - {"user": "What does a Product Manager do?"}, ], user_content="What does a Product Manager do?", + max_tokens=3000, ) </s> ===========changed ref 3=========== # module: tests.test_chatapproach def test_get_messages_from_history(): # offset: 1 <s> ], user_content="What does a Product Manager do?", + max_tokens=3000, ) assert messages == [ {"role": "system", "content": "You are a bot."}, {"role": "user", "content": "What happens in a performance review?"}, { "role": "assistant", "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"role": "user", "content": "What does a Product Manager do?"}, ] ===========changed ref 4=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 5=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 6=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 7=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 8=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8
tests.test_app/test_chat_session_state_persists
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<7>:<del> "stream": False,
# module: tests.test_app @pytest.mark.asyncio async def test_chat_session_state_persists(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "messages": [{"content": "What is the capital of France?", "role": "user"}], <4> "context": { <5> "overrides": {"retrieval_mode": "text"}, <6> }, <7> "stream": False, <8> "session_state": {"conversation_id": 1234}, <9> }, <10> ) <11> assert response.status_code == 200 <12> result = await response.get_json() <13> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging DEBUG = 10 at: tests.test_app.test_chat_with_history response = await client.post( "/chat", json={ "messages": [ {"content": "What happens in a performance review?", "role": "user"}, { "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", "role": "assistant", }, {"content": "Is dental covered?", "role": "user"}, ], "context": { "overrides": {"retrieval_mode": "text"}, }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_session_state_persists(client, snapshot): - response = await client.post( - "/ask", - json={ - "messages": [{"content": "What is the capital of France?", "role": "user"}], - "context": { - "overrides": {"retrieval_mode": "text"}, - }, - "session_state": {"conversation_id": 1234}, - }, - ) - assert response.status_code == 200 - result = await response.get_json() - snapshot.assert_match(json.dumps(result, indent=4), "result.json") - ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_history(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [ + {"content": "What happens in a performance review?", "role": "user"}, + { + "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + "role": "assistant", + }, + {"content": "Is dental covered?", "role": "user"}, + ], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + assert result["choices"][0]["context"]["thoughts"].find("performance review") != -1 + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 3=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 4=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 5=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 6=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 7=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") builder.append_message("user", "a\u0301") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"}, # 1 token, 1 token, 1 token, 1 token {"role": "user", "content": "á"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 + assert builder.count_tokens_for_message(builder.messages[1]) == 4 - assert builder.token_length == 8
locustfile/ChatUser.ask_question
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<7>:<add> "content": random.choice( <del> "user": random.choice( <14>:<add> ), <del> ) <15>:<add> "role": "user", <add> }, <del> } <31>:<add> {"content": "What happens in a performance review?", "role": "user"}, <32>:<add> "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include
# module: locustfile class ChatUser(HttpUser): @task def ask_question(self): <0> self.client.get("/") <1> time.sleep(5) <2> self.client.post( <3> "/chat", <4> json={ <5> "history": [ <6> { <7> "user": random.choice( <8> [ <9> "What is included in my Northwind Health Plus plan that is not in standard?", <10> "What does a Product Manager do?", <11> "What happens in a performance review?", <12> "Whats your whistleblower policy?", <13> ] <14> ) <15> } <16> ], <17> "overrides": { <18> "retrieval_mode": "hybrid", <19> "semantic_ranker": True, <20> "semantic_captions": False, <21> "top": 3, <22> "suggest_followup_questions": False, <23> }, <24> }, <25> ) <26> time.sleep(5) <27> self.client.post( <28> "/chat", <29> json={ <30> "history": [ <31> { <32> "user": "What happens in a performance review?", <33> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <34> }, <35> {"user": "Does my plan cover eye exams?"}, <36> ], <37> "overrides": { <38> "retrieval_mode": "hybrid", <39> "semantic_ranker": True, <40> "semantic_captions": False, <41> "top</s>
===========below chunk 0=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 "suggest_followup_questions": False, }, }, ) ===========unchanged ref 0=========== at: locustfile.ChatUser wait_time = between(5, 20) at: random choice = _inst.choice at: time sleep(secs: float) -> None ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 3=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 4=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_session_state_persists(client, snapshot): response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text"}, }, - "stream": False, "session_state": {"conversation_id": 1234}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_session_state_persists(client, snapshot): - response = await client.post( - "/ask", - json={ - "messages": [{"content": "What is the capital of France?", "role": "user"}], - "context": { - "overrides": {"retrieval_mode": "text"}, - }, - "session_state": {"conversation_id": 1234}, - }, - ) - assert response.status_code == 200 - result = await response.get_json() - snapshot.assert_match(json.dumps(result, indent=4), "result.json") - ===========changed ref 7=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") builder.append_message("user", "a\u0301") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"}, # 1 token, 1 token, 1 token, 1 token {"role": "user", "content": "á"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 + assert builder.count_tokens_for_message(builder.messages[1]) == 4 - assert builder.token_length == 8 ===========changed ref 8=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 + assert builder.count_tokens_for_message(builder.messages[1]) == 9 - assert builder.token_length == 17 ===========changed ref 9=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_long_history(client, snapshot, caplog): + """This test makes sure that the history is truncated to max tokens minus 1024.""" + caplog.set_level(logging.DEBUG) + response = await client.post( + "/chat", + json={ + "messages": [ + {"role": "user", "content": "Is there a dress code?"}, # 9 tokens + { + "role": "assistant", + "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]" + * 150, + }, # 3900 tokens + {"role": "user", "content": "What does a product manager do?"}, # 10 tokens + ], + "context": { + "overrides": {"retrieval_mode": "text"}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + # Assert that it doesn't find the first message, since it wouldn't fit in the max tokens. + assert result["choices"][0]["context"]["thoughts"].find("Is there a dress code?") == -1 + assert "Reached max tokens" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
07c2afa40c6d46210562a692c997ce68286b1bc8
Message builder fixes for history format and token count (#778)
<9>:<add> total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1]) <10>:<add> newest_to_oldest = list(reversed(history[:-1])) <add> for message in newest_to_oldest: <add> potential_message_count = message_builder.count_tokens_for_message(message) <add> if (total_token_count + potential_message_count) > max_tokens: <add> logging.debug("Reached max tokens of %d, history will be truncated", max_tokens) <del> for h in reversed(history[:-1]): <11>:<del> if message_builder.token_length > max_tokens: <13>:<del> if bot_msg := h.get("bot"): <14>:<del> message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) <15>:<del> if user_msg := h.get("user"): <16>:<add> message_builder.append_message(message["role"], message["content"], index=append_index) <del> message_builder.append_message(self.USER, user_msg, index=append_index) <17>:<add> total_token_count += potential_message_count <del>
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_content: str, + max_tokens: int, few_shots=[], - max_tokens: int = 4096, ) -> list: <0> message_builder = MessageBuilder(system_prompt, model_id) <1> <2> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <3> for shot in few_shots: <4> message_builder.append_message(shot.get("role"), shot.get("content")) <5> <6> append_index = len(few_shots) + 1 <7> <8> message_builder.append_message(self.USER, user_content, index=append_index) <9> <10> for h in reversed(history[:-1]): <11> if message_builder.token_length > max_tokens: <12> break <13> if bot_msg := h.get("bot"): <14> message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) <15> if user_msg := h.get("user"): <16> message_builder.append_message(self.USER, user_msg, index=append_index) <17> <18> return message_builder.messages <19>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_with_streaming(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any=None) -> AsyncGenerator[dict, None] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run overrides = context.get("overrides", {}) auth_claims = context.get("auth_claims", {}) response = await self.run_without_streaming(messages, overrides, auth_claims, session_state) at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) count_tokens_for_message(message: dict[str, str]) ===========unchanged ref 2=========== at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def count_tokens_for_message(self, message: dict[str, str]): + return num_tokens_from_messages(message, self.model) + ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: def append_message(self, role: str, content: str, index: int = 1): self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - self.token_length += num_tokens_from_messages(self.messages[index], self.model) ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: def __init__(self, system_content: str, chatgpt_model: str): self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.model = chatgpt_model - self.token_length = num_tokens_from_messages(self.messages[-1], self.model) ===========changed ref 3=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 4 - assert builder.token_length == 4 ===========changed ref 4=========== # module: tests.test_messagebuilder def test_messagebuilder(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."} ] assert builder.model == "gpt-35-turbo" + assert builder.count_tokens_for_message(builder.messages[0]) == 8 - assert builder.token_length == 8 ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_session_state_persists(client, snapshot): response = await client.post( "/chat", json={ "messages": [{"content": "What is the capital of France?", "role": "user"}], "context": { "overrides": {"retrieval_mode": "text"}, }, - "stream": False, "session_state": {"conversation_id": 1234}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_session_state_persists(client, snapshot): - response = await client.post( - "/ask", - json={ - "messages": [{"content": "What is the capital of France?", "role": "user"}], - "context": { - "overrides": {"retrieval_mode": "text"}, - }, - "session_state": {"conversation_id": 1234}, - }, - ) - assert response.status_code == 200 - result = await response.get_json() - snapshot.assert_match(json.dumps(result, indent=4), "result.json") -
tests.test_chatapproach/test_get_messages_from_history_truncated_break_pair
Modified
Azure-Samples~azure-search-openai-demo
1844b005ab62c301d7b62fbc3b6aed7267e46eae
Add scheduler package (#822)
<1>:<add> chat_approach = ChatReadRetrieveReadApproach( <add> None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" <add> ) <del> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "")
# module: tests.test_chatapproach def test_get_messages_from_history_truncated_break_pair(): <0> """Tests that the truncation breaks the pair of messages.""" <1> chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") <2> <3> messages = chat_approach.get_messages_from_history( <4> system_prompt="You are a bot.", # 8 tokens <5> model_id="gpt-35-turbo", <6> history=[ <7> {"role": "user", "content": "What happens in a performance review?"}, # 10 tokens <8> { <9> "role": "assistant", <10> "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <11> }, # 102 tokens <12> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <13> { <14> "role": "assistant", <15> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", <16> }, # 26 tokens <17> {"role": "user", "content": "What does a Product Manager do?"}, # 10 tokens <18> ], <19> user_content="What does a Product Manager do?", <20> max_tokens=147, <21> ) <22> assert messages == [ <23> {"role": "system", "content": "You are a bot."}, <24> { <25> "role": "assistant", <26> </s>
===========below chunk 0=========== # module: tests.test_chatapproach def test_get_messages_from_history_truncated_break_pair(): # offset: 1 }, {"role": "user", "content": "Is there a dress code?"}, { "role": "assistant", "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", }, {"role": "user", "content": "What does a Product Manager do?"}, ]
scripts.prepdocs/read_files
Modified
Azure-Samples~azure-search-openai-demo
ca01af94d740e77faf37ba08b1b994f29e5cc32f
Store an MD5 hash of uploaded/indexed file and check before prepdocs (#835)
<15>:<add> # if filename ends in .md5 skip <add> if filename.endswith(".md5"): <add> continue <add> <add> # if there is a file called .md5 in this directory, see if its updated <add> stored_hash = None <add> with open(filename, "rb") as file: <add> existing_hash = hashlib.md5(file.read()).hexdigest() <add> if os.path.exists(filename + ".md5"): <add> with open(filename + ".md5", encoding="utf-8") as md5_f: <add> stored_hash = md5_f.read() <add> <add> if stored_hash and stored_hash.strip() == existing_hash.strip(): <add> print(f"Skipping {filename}, no changes detected.") <add> continue <add> else: <add> # Write the hash <add> with open(filename + ".md5", "w", encoding="utf-8") as md5_f: <add> md5_f.write
# module: scripts.prepdocs def read_files( path_pattern: str, use_vectors: bool, vectors_batch_support: bool, embedding_deployment: Optional[str] = None, embedding_model: Optional[str] = None, ): <0> """ <1> Recursively read directory structure under `path_pattern` <2> and execute indexing for the individual files <3> """ <4> for filename in glob.glob(path_pattern): <5> if args.verbose: <6> print(f"Processing '{filename}'") <7> if args.remove: <8> remove_blobs(filename) <9> remove_from_index(filename) <10> else: <11> if os.path.isdir(filename): <12> read_files(filename + "/*", use_vectors, vectors_batch_support) <13> continue <14> try: <15> if not args.skipblobs: <16> upload_blobs(filename) <17> page_map = get_document_text(filename) <18> sections = create_sections( <19> os.path.basename(filename), <20> page_map, <21> use_vectors and not vectors_batch_support, <22> embedding_deployment, <23> embedding_model, <24> ) <25> if use_vectors and vectors_batch_support: <26> sections = update_embeddings_in_batch(sections) <27> index_sections(os.path.basename(filename), sections) <28> except Exception as e: <29> print(f"\tGot an error while reading {filename} -> {e} --> skipping file") <30>
===========unchanged ref 0=========== at: glob glob(pathname: AnyStr, *, recursive: bool=...) -> List[AnyStr] at: hashlib md5(string: ReadableBuffer=...) -> _Hash at: hashlib._Hash digest_size: int block_size: int name: str hexdigest() -> str at: io.BufferedReader read(self, size: Optional[int]=..., /) -> bytes at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: os.path exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool isdir(s: AnyPath) -> bool at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() remove_blobs(filename) remove_from_index(filename) at: typing.IO __slots__ = () read(n: int=...) -> AnyStr
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
16a61bfae3e0e5ff5c6028fd10114522f3246dac
Improve follow-up questions and pipe into context (#832)
<1>:<add> def __init__(self, answer: str): <del> def __init__(self, answer): <5>:<del> {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, <7>:<add> # Split at << to simulate chunked responses <add> if answer.find("<<") > -1: <add> parts = answer.split("<<") <add> self.responses.append( <add> { <add> "object": "chat.completion.chunk", <add> "choices": [{"delta": {"role": "assistant", "content": parts[0] + "<<"}}], <add> } <add> ) <add> self.responses.append( <add> { <add> "object": "chat.completion.chunk", <add> "choices": [{"delta": {"role": "assistant", "content": parts[1]}}], <add> } <add> ) <add> else: <add> self.responses.append( <add> { <add> "object": "chat.completion.chunk", <add> "choices": [{"delta": {"content": answer}}], <add> } <add> )
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer): <2> self.responses = [ <3> {"object": "chat.completion.chunk", "choices": []}, <4> {"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]}, <5> {"object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}}]}, <6> ] <7> <8> def __aiter__(self): <9> return self <10> <11> async def __anext__(self): <12> if self.responses: <13> return self.responses.pop(0) <14> else: <15> raise StopAsyncIteration <16> <17> async def mock_acreate(*args, **kwargs): <18> if openai.api_type == "openai": <19> assert kwargs.get("deployment_id") is None <20> else: <21> assert kwargs.get("deployment_id") is not None <22> messages = kwargs["messages"] <23> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <24> answer = "capital of France" <25> else: <26> answer = "The capital of France is Paris. [Benefit_Options-2.pdf]." <27> if "stream" in kwargs and kwargs["stream"] is True: <28> return AsyncChatCompletionIterator(answer) <29> else: <30> return openai.util.convert_to_openai_object( <31> {"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]} <32> ) <33> <34> monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) <35>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] ===========changed ref 0=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "Here is answer to your question." + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question." + assert followup_questions == [] + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "<<What is the dress code?>>" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "" + assert followup_questions == ["What is the dress code?"] + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_followup(client, snapshot): + response = await client.post( + "/chat", + json={ + "stream": True, + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"suggest_followup_questions": True}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 3=========== # module: tests.test_chatapproach + def test_extract_followup_questions(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "Here is answer to your question.<<What is the dress code?>>" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question." + assert followup_questions == ["What is the dress code?"] + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_followup(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"suggest_followup_questions": True}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + assert result["choices"][0]["context"]["followup_questions"][0] == "What is the capital of Spain?" + + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 5=========== # module: tests.test_chatapproach + def test_extract_followup_questions_three(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = """Here is answer to your question. + + <<What are some examples of successful product launches they should have experience with?>> + <<Are there any specific technical skills or certifications required for the role?>> + <<Is there a preference for candidates with experience in a specific industry or sector?>>""" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question.\n\n" + assert followup_questions == [ + "What are some examples of successful product launches they should have experience with?", + "Are there any specific technical skills or certifications required for the role?", + "Is there a preference for candidates with experience in a specific industry or sector?", + ] +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
16a61bfae3e0e5ff5c6028fd10114522f3246dac
Improve follow-up questions and pipe into context (#832)
<5>:<add> if overrides.get("suggest_followup_questions"): <add> content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) <add> chat_resp["choices"][0]["message"]["content"] = content <add> chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=False <2> ) <3> chat_resp = dict(await chat_coroutine) <4> chat_resp["choices"][0]["context"] = extra_info <5> chat_resp["choices"][0]["session_state"] = session_state <6> return chat_resp <7>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>".""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_until_final_call extra_info = { "data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace("\n", "<br>"), } ===========unchanged ref 2=========== chat_coroutine = openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=response_token_limit, n=1, stream=should_stream, ) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. + Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ + follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. - follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. + Enclose the follow-up questions in double angle brackets.</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> would likely ask next about their healthcare plan and employee handbook. + Enclose the follow-up questions in double angle brackets. Example: + <<Are there exclusions for prescriptions?>> + <<Which pharmacies can be ordered from?>> + <<What is the limit for over-the-counter medication?>> - Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. + Do no repeat questions that have already been asked. - Try not to repeat questions that have already been asked. + Make sure the last question ends with ">>".""" - Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
16a61bfae3e0e5ff5c6028fd10114522f3246dac
Improve follow-up questions and pipe into context (#832)
<16>:<add> followup_questions_started = False <add> followup_content = "" <19>:<add> # if event contains << and not >>, it is start of follow-up question, truncate <add> content = event["choices"][0]["delta"].get("content", "") <add> if overrides.get("suggest_followup_questions") and "<<" in content: <add> followup_questions_started = True <add> earlier_content = content[: content.index("<<")] <add> if earlier_content: <add> event["choices"][0]["delta"]["content"] = earlier_content <add> yield event <add> followup_content += content[content.index("<<") :] <add> elif followup_questions_started: <add> followup_content += content <add> else: <add> yield event <del> yield event <20>:<add> if followup_content: <add> _, followup_questions = self.extract_followup_questions(followup_content) <add> yield {
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> history, overrides, auth_claims, should_stream=True <2> ) <3> yield { <4> "choices": [ <5> { <6> "delta": {"role": self.ASSISTANT}, <7> "context": extra_info, <8> "session_state": session_state, <9> "finish_reason": None, <10> "index": 0, <11> } <12> ], <13> "object": "chat.completion.chunk", <14> } <15> <16> async for event in await chat_coroutine: <17> # "2023-07-01-preview" API version has a bug where first response has empty choices <18> if event["choices"]: <19> yield event <20>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach ASSISTANT = "assistant" run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], should_stream: bool=False) -> tuple extract_followup_questions(content: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_without_streaming extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + if overrides.get("suggest_followup_questions"): + content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) + chat_resp["choices"][0]["message"]["content"] = content + chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. + Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ + follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. - follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. + Enclose the follow-up questions in double angle brackets.</s> ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> would likely ask next about their healthcare plan and employee handbook. + Enclose the follow-up questions in double angle brackets. Example: + <<Are there exclusions for prescriptions?>> + <<Which pharmacies can be ordered from?>> + <<What is the limit for over-the-counter medication?>> - Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. + Do no repeat questions that have already been asked. - Try not to repeat questions that have already been asked. + Make sure the last question ends with ">>".""" - Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] ===========changed ref 3=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "Here is answer to your question." + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question." + assert followup_questions == [] + ===========changed ref 4=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "<<What is the dress code?>>" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "" + assert followup_questions == ["What is the dress code?"] +
tests.e2e/test_chat_customization
Modified
Azure-Samples~azure-search-openai-demo
16a61bfae3e0e5ff5c6028fd10114522f3246dac
Improve follow-up questions and pipe into context (#832)
<2>:<add> assert route.request.post_data_json["stream"] is False <9>:<del> assert overrides["suggest_followup_questions"] is True
# module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint <1> def handle(route: Route): <2> overrides = route.request.post_data_json["context"]["overrides"] <3> assert overrides["retrieval_mode"] == "vectors" <4> assert overrides["semantic_ranker"] is False <5> assert overrides["semantic_captions"] is True <6> assert overrides["top"] == 1 <7> assert overrides["prompt_template"] == "You are a cat and only talk about tuna." <8> assert overrides["exclude_category"] == "dogs" <9> assert overrides["suggest_followup_questions"] is True <10> assert overrides["use_oid_security_filter"] is False <11> assert overrides["use_groups_security_filter"] is False <12> <13> # Read the JSON from our snapshot results and return as the response <14> f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") <15> json = f.read() <16> f.close() <17> route.fulfill(body=json, status=200) <18> <19> page.route("*/**/chat", handle) <20> <21> # Check initial page state <22> page.goto(live_server_url) <23> expect(page).to_have_title("GPT + Enterprise data | Sample") <24> <25> # Customize all the settings <26> page.get_by_role("button", name="Developer settings").click() <27> page.get_by_label("Override prompt template").click() <28> page.get_by_label("Override prompt template").fill("You are a cat and only talk about tuna.") <29> page.get_by_label("Retrieve this many search results:").click() <30> page.get_by_label("Retrieve this many search results:").fill("1") <31> page.get_by_label("Exclude category").click() <32> page.get_by_label("Exclude category").fill("dogs") <33> page.</s>
===========below chunk 0=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # offset: 1 page.get_by_text("Suggest follow-up questions").click() page.get_by_text("Use semantic ranker for retrieval").click() page.get_by_text("Vectors + Text (Hybrid)").click() page.get_by_role("option", name="Vectors", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Ask question button").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========unchanged ref 0=========== at: io.BufferedRandom close(self) -> None read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def extract_followup_questions(self, content: str): + return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) + ===========changed ref 1=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_followup(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "Here is answer to your question." + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question." + assert followup_questions == [] + ===========changed ref 2=========== # module: tests.test_chatapproach + def test_extract_followup_questions_no_pre_content(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "<<What is the dress code?>>" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "" + assert followup_questions == ["What is the dress code?"] + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_followup(client, snapshot): + response = await client.post( + "/chat", + json={ + "stream": True, + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"suggest_followup_questions": True}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 4=========== # module: tests.test_chatapproach + def test_extract_followup_questions(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = "Here is answer to your question.<<What is the dress code?>>" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question." + assert followup_questions == ["What is the dress code?"] + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_followup(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"suggest_followup_questions": True}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + assert result["choices"][0]["context"]["followup_questions"][0] == "What is the capital of Spain?" + + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 6=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run_without_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( history, overrides, auth_claims, should_stream=False ) chat_resp = dict(await chat_coroutine) chat_resp["choices"][0]["context"] = extra_info + if overrides.get("suggest_followup_questions"): + content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"]) + chat_resp["choices"][0]["message"]["content"] = content + chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions chat_resp["choices"][0]["session_state"] = session_state return chat_resp ===========changed ref 7=========== # module: tests.test_chatapproach + def test_extract_followup_questions_three(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + content = """Here is answer to your question. + + <<What are some examples of successful product launches they should have experience with?>> + <<Are there any specific technical skills or certifications required for the role?>> + <<Is there a preference for candidates with experience in a specific industry or sector?>>""" + pre_content, followup_questions = chat_approach.extract_followup_questions(content) + assert pre_content == "Here is answer to your question.\n\n" + assert followup_questions == [ + "What are some examples of successful product launches they should have experience with?", + "Are there any specific technical skills or certifications required for the role?", + "Is there a preference for candidates with experience in a specific industry or sector?", + ] +
tests.test_messagebuilder/test_messagebuilder_append
Modified
Azure-Samples~azure-search-openai-demo
d02aa1441ecd4a027c29e4aaebe4b567f68f937b
Message builder improvements (#852)
<1>:<add> builder.insert_message("user", "Hello, how are you?") <del> builder.append_message("user", "Hello, how are you?")
# module: tests.test_messagebuilder def test_messagebuilder_append(): <0> builder = MessageBuilder("You are a bot.", "gpt-35-turbo") <1> builder.append_message("user", "Hello, how are you?") <2> assert builder.messages == [ <3> # 1 token, 1 token, 1 token, 5 tokens <4> {"role": "system", "content": "You are a bot."}, <5> # 1 token, 1 token, 1 token, 6 tokens <6> {"role": "user", "content": "Hello, how are you?"}, <7> ] <8> assert builder.model == "gpt-35-turbo" <9> assert builder.count_tokens_for_message(builder.messages[0]) == 8 <10> assert builder.count_tokens_for_message(builder.messages[1]) == 9 <11>
===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: - def append_message(self, role: str, content: str, index: int = 1): - self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def insert_message(self, role: str, content: str, index: int = 1): + """ + Inserts a message into the conversation at the specified index, + or at index 1 (after system message) if no index is specified. + Args: + role (str): The role of the message sender (either "user" or "system"). + content (str): The content of the message. + index (int): The index at which to insert the message. + """ + self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) +
tests.test_messagebuilder/test_messagebuilder_unicode_append
Modified
Azure-Samples~azure-search-openai-demo
d02aa1441ecd4a027c29e4aaebe4b567f68f937b
Message builder improvements (#852)
<1>:<add> builder.insert_message("user", "a\u0301") <del> builder.append_message("user", "a\u0301")
# module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): <0> builder = MessageBuilder("a\u0301", "gpt-35-turbo") <1> builder.append_message("user", "a\u0301") <2> assert builder.messages == [ <3> # 1 token, 1 token, 1 token, 1 token <4> {"role": "system", "content": "á"}, <5> # 1 token, 1 token, 1 token, 1 token <6> {"role": "user", "content": "á"}, <7> ] <8> assert builder.model == "gpt-35-turbo" <9> assert builder.count_tokens_for_message(builder.messages[0]) == 4 <10> assert builder.count_tokens_for_message(builder.messages[1]) == 4 <11>
===========changed ref 0=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") + builder.insert_message("user", "Hello, how are you?") - builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" assert builder.count_tokens_for_message(builder.messages[0]) == 8 assert builder.count_tokens_for_message(builder.messages[1]) == 9 ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: - def append_message(self, role: str, content: str, index: int = 1): - self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - ===========changed ref 2=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def insert_message(self, role: str, content: str, index: int = 1): + """ + Inserts a message into the conversation at the specified index, + or at index 1 (after system message) if no index is specified. + Args: + role (str): The role of the message sender (either "user" or "system"). + content (str): The content of the message. + index (int): The index at which to insert the message. + """ + self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) +
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
d02aa1441ecd4a027c29e4aaebe4b567f68f937b
Message builder improvements (#852)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: <0> q = messages[-1]["content"] <1> overrides = context.get("overrides", {}) <2> auth_claims = context.get("auth_claims", {}) <3> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <4> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <5> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <6> top = overrides.get("top", 3) <7> filter = self.build_filter(overrides, auth_claims) <8> <9> # If retrieval mode includes vectors, compute an embedding for the query <10> if has_vector: <11> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <12> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <13> query_vector = embedding["data"][0]["embedding"] <14> else: <15> query_vector = None <16> <17> # Only keep the text query if the retrieval mode uses text, otherwise drop it <18> query_text = q if has_text else "" <19> <20> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <21> if overrides.get("semantic_ranker") and has_text: <22> r = await self.search_client.search( <23> query_text, <24> filter=filter, <25> query_type=QueryType.SEMANTIC, <26> query_language=self.query_</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 1 query_speller=self.query_speller, semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[dict], stream: bool = False, # Stream is not used in this approach session_state: Any = None, context: dict[str, Any] = {}, ) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]: # offset: 2 <s> # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) extra_info = { "data_points": results, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } chat_completion.choices[0]["context"] = extra_info chat_completion.choices[0]["session_state"] = session_state return chat_completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] run(self, messages: list[dict], stream: bool=False, session_state: Any=None, context: dict[str, Any]={}) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder insert_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params)
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
d02aa1441ecd4a027c29e4aaebe4b567f68f937b
Message builder improvements (#852)
<3>:<add> for shot in reversed(few_shots): <del> for shot in few_shots: <4>:<add> message_builder.insert_message(shot.get("role"), shot.get("content")) <del> message_builder.append_message(shot.get("role"), shot.get("content")) <8>:<add> message_builder.insert_message(self.USER, user_content, index=append_index) <del> message_builder.append_message(self.USER, user_content, index=append_index) <17>:<add> message_builder.insert_message(message["role"], message["content"], index=append_index) <del> message_builder.append_message(message["role"], message["content"], index=append_index)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_content: str, max_tokens: int, few_shots=[], ) -> list: <0> message_builder = MessageBuilder(system_prompt, model_id) <1> <2> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <3> for shot in few_shots: <4> message_builder.append_message(shot.get("role"), shot.get("content")) <5> <6> append_index = len(few_shots) + 1 <7> <8> message_builder.append_message(self.USER, user_content, index=append_index) <9> total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1]) <10> <11> newest_to_oldest = list(reversed(history[:-1])) <12> for message in newest_to_oldest: <13> potential_message_count = message_builder.count_tokens_for_message(message) <14> if (total_token_count + potential_message_count) > max_tokens: <15> logging.debug("Reached max tokens of %d, history will be truncated", max_tokens) <16> break <17> message_builder.append_message(message["role"], message["content"], index=append_index) <18> total_token_count += potential_message_count <19> return message_builder.messages <20>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>".""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder count_tokens_for_message(message: dict[str, str]) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] at: logging debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.core.messagebuilder class MessageBuilder: - def append_message(self, role: str, content: str, index: int = 1): - self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) - ===========changed ref 1=========== # module: app.backend.core.messagebuilder class MessageBuilder: + def insert_message(self, role: str, content: str, index: int = 1): + """ + Inserts a message into the conversation at the specified index, + or at index 1 (after system message) if no index is specified. + Args: + role (str): The role of the message sender (either "user" or "system"). + content (str): The content of the message. + index (int): The index at which to insert the message. + """ + self.messages.insert(index, {"role": role, "content": self.normalize_content(content)}) + ===========changed ref 2=========== # module: tests.test_messagebuilder def test_messagebuilder_unicode_append(): builder = MessageBuilder("a\u0301", "gpt-35-turbo") + builder.insert_message("user", "a\u0301") - builder.append_message("user", "a\u0301") assert builder.messages == [ # 1 token, 1 token, 1 token, 1 token {"role": "system", "content": "á"}, # 1 token, 1 token, 1 token, 1 token {"role": "user", "content": "á"}, ] assert builder.model == "gpt-35-turbo" assert builder.count_tokens_for_message(builder.messages[0]) == 4 assert builder.count_tokens_for_message(builder.messages[1]) == 4 ===========changed ref 3=========== # module: tests.test_messagebuilder def test_messagebuilder_append(): builder = MessageBuilder("You are a bot.", "gpt-35-turbo") + builder.insert_message("user", "Hello, how are you?") - builder.append_message("user", "Hello, how are you?") assert builder.messages == [ # 1 token, 1 token, 1 token, 5 tokens {"role": "system", "content": "You are a bot."}, # 1 token, 1 token, 1 token, 6 tokens {"role": "user", "content": "Hello, how are you?"}, ] assert builder.model == "gpt-35-turbo" assert builder.count_tokens_for_message(builder.messages[0]) == 8 assert builder.count_tokens_for_message(builder.messages[1]) == 9 ===========changed ref 4=========== # module: tests.test_chatapproach + def test_get_messages_from_history_few_shots(): + chat_approach = ChatReadRetrieveReadApproach( + None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "", "en-us", "lexicon" + ) + + user_query_request = "What does a Product manager do?" + messages = chat_approach.get_messages_from_history( + system_prompt=chat_approach.query_prompt_template, + model_id=chat_approach.chatgpt_model, + user_content=user_query_request, + history=[], + max_tokens=chat_approach.chatgpt_token_limit - len(user_query_request), + few_shots=chat_approach.query_prompt_few_shots, + ) + # Make sure messages are in the right order + assert messages[0]["role"] == "system" + assert messages[1]["role"] == "user" + assert messages[2]["role"] == "assistant" + assert messages[3]["role"] == "user" + assert messages[4]["role"] == "assistant" + assert messages[5]["role"] == "user" + assert messages[5]["content"] == user_query_request +
tests.test_prepdocs/test_filename_to_id
Modified
Azure-Samples~azure-search-openai-demo
a64a12eba8d78184e57ae625284cce3f2f76bd25
Refactor prepdocs (#862)
<0>:<add> empty = io.BytesIO() <add> empty.name = "foo.pdf" <1>:<add> assert File(empty).filename_to_id() == "file-foo_pdf-666F6F2E706466" <del> assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" <3>:<add> empty.name = "foo\u00A9.txt" <add> assert File(empty).filename_to_id() == "file-foo__txt-666F6FC2A92E747874" <del> assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" <5>:<add> empty.name = "ファイル名.pdf" <add> assert File(empty).filename_to_id() == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" <del> assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466"
# module: tests.test_prepdocs def test_filename_to_id(): <0> # test ascii filename <1> assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" <2> # test filename containing unicode <3> assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" <4> # test filenaming starting with unicode <5> assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" <6>
===========unchanged ref 0=========== at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO name: Any at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File filename_to_id() ===========changed ref 0=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + """ + Represents a file stored either locally or in a data lake storage account + This file might contain access control information about which users or groups can access it + """ + ===========changed ref 1=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def filename_to_id(self): + filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", self.filename()) + filename_hash = base64.b16encode(self.filename().encode("utf-8")).decode("ascii") + return f"file-{filename_ascii}-{filename_hash}" + ===========changed ref 2=========== + # module: scripts.prepdocslib.blobmanager + + ===========changed ref 3=========== + # module: scripts.prepdocslib.listfilestrategy + + ===========changed ref 4=========== + # module: scripts.prepdocslib + + ===========changed ref 5=========== + # module: scripts.prepdocslib.embeddings + + ===========changed ref 6=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __enter__(self): + return self + ===========changed ref 7=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_embedding_arguments(self) -> dict[str, Any]: + raise NotImplementedError + ===========changed ref 8=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __exit__(self, *args): + self.close() + ===========changed ref 9=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list_paths(self) -> AsyncGenerator[str, None]: + if False: + yield + ===========changed ref 10=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list(self) -> AsyncGenerator[File, None]: + if False: + yield + ===========changed ref 11=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 12=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 13=========== # module: scripts.prepdocs + def is_key_empty(key): + return key is None or len(key.strip()) == 0 + ===========changed ref 14=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 15=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str, verbose: bool = False): + self.path_pattern = path_pattern + self.verbose = verbose + ===========changed ref 16=========== + # module: scripts.prepdocslib.embeddings + class EmbeddingBatch: + def __init__(self, texts: List[str], token_length: int): + self.texts = texts + self.token_length = token_length + ===========changed ref 17=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a local filesystem + """ + ===========changed ref 18=========== + # module: scripts.prepdocslib.embeddings + class EmbeddingBatch: + """ + Represents a batch of text that is going to be embedded + """ + ===========changed ref 19=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def list_paths(self) -> AsyncGenerator[str, None]: + async for p in self._list_paths(self.path_pattern): + yield p + ===========changed ref 20=========== + # module: scripts.prepdocslib.blobmanager + class BlobManager: + """ + Class to manage uploading and deleting blobs containing citation information from a blob storage account + """ + ===========changed ref 21=========== + # module: scripts.prepdocslib.embeddings + class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def get_api_type(self) -> str: + return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" + ===========changed ref 22=========== + # module: scripts.prepdocslib.listfilestrategy + class ADLSGen2ListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a data lake storage account + """ + ===========changed ref 23=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 24=========== + # module: scripts.prepdocslib.blobmanager + class BlobManager: + def __init__( + self, + endpoint: str, + container: str, + credential: Union[AsyncTokenCredential, str], + verbose: bool = False, + ): + self.endpoint = endpoint + self.credential = credential + self.container = container + self.verbose = verbose + ===========changed ref 25=========== # module: scripts.prepdocs - def before_retry_sleep(retry_state): - if args.verbose: - print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - ===========changed ref 26=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + """ + Abstract strategy for listing files that are located somewhere. For example, on a local computer or remotely in a storage account + """ + ===========changed ref 27=========== # module: scripts.prepdocs - def calculate_tokens_emb_aoai(input: str): - encoding = tiktoken.encoding_for_model(args.openaimodelname) - return len(encoding.encode(input)) - ===========changed ref 28=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def calculate_token_length(self, text: str): + encoding = tiktoken.encoding_for_model(self.open_ai_model_name) + return len(encoding.encode(text)) + ===========changed ref 29=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddingService(OpenAIEmbeddings): + def __init__( + self, + open_ai_model_name: str, + credential: str, + organization: Optional[str] = None, + disable_batch: bool = False, + verbose: bool = False, + ): + super().__init__(open_ai_model_name, disable_batch, verbose) + self.credential = credential + self.organization = organization + ===========changed ref 30=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def __init__(self, open_ai_model_name: str, disable_batch: bool = False, verbose: bool = False): + self.open_ai_model_name = open_ai_model_name + self.disable_batch = disable_batch + self.verbose = verbose + ===========changed ref 31=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddingService(OpenAIEmbeddings): + """ + Class for using OpenAI embeddings + To learn more please visit https://platform.openai.com/docs/guides/embeddings + """ +
tests.test_prepdocs/test_compute_embedding_success
Modified
Azure-Samples~azure-search-openai-demo
a64a12eba8d78184e57ae625284cce3f2f76bd25
Refactor prepdocs (#862)
<0>:<del> monkeypatch.setattr(args, "verbose", True) <1>:<del> <2>:<add> async def mock_create(*args, **kwargs): <del> def mock_create(*args, **kwargs): <21>:<add> monkeypatch.setattr(openai.Embedding, "acreate", mock_create) <del> monkeypatch.setattr(openai.Embedding, "create", mock_create) <22>:<add> embeddings = AzureOpenAIEmbeddingService( <add> open_ai_service="x", <add> open_ai_deployment="x", <add> open_ai_model_name="text-ada-003", <add> credential=MockAzureCredential(), <add> disable_batch=False, <add> ) <add> assert await embeddings.create_embeddings(texts=["foo"]) == [ <add> [ <del> assert compute_embedding("foo", "ada", "text-ada-003") == [ <23>:<add> 0.0023064255, <del> 0.0023064255, <24>:<add> -0.009327292, <del> -0.009327292, <25>:<add> -0.0028842222, <del> -0.0028842222, <26>:<add> ]
# module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_compute_embedding_success(monkeypatch): - def test_compute_embedding_success(monkeypatch, capsys): <0> monkeypatch.setattr(args, "verbose", True) <1> <2> def mock_create(*args, **kwargs): <3> # From https://platform.openai.com/docs/api-reference/embeddings/create <4> return { <5> "object": "list", <6> "data": [ <7> { <8> "object": "embedding", <9> "embedding": [ <10> 0.0023064255, <11> -0.009327292, <12> -0.0028842222, <13> ], <14> "index": 0, <15> } <16> ], <17> "model": "text-embedding-ada-002", <18> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <19> } <20> <21> monkeypatch.setattr(openai.Embedding, "create", mock_create) <22> assert compute_embedding("foo", "ada", "text-ada-003") == [ <23> 0.0023064255, <24> -0.009327292, <25> -0.0028842222, <26> ] <27>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: io.BytesIO name: Any at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False, verbose: bool=False) at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File filename_to_id() at: tests.test_prepdocs.test_filename_to_id empty = io.BytesIO() ===========changed ref 0=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + """ + Represents a file stored either locally or in a data lake storage account + This file might contain access control information about which users or groups can access it + """ + ===========changed ref 1=========== + # module: scripts.prepdocslib.embeddings + class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + """ + Class for using Azure OpenAI embeddings + To learn more please visit https://learn.microsoft.com/azure/ai-services/openai/concepts/understand-embeddings + """ + ===========changed ref 2=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def filename_to_id(self): + filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", self.filename()) + filename_hash = base64.b16encode(self.filename().encode("utf-8")).decode("ascii") + return f"file-{filename_ascii}-{filename_hash}" + ===========changed ref 3=========== # module: tests.test_prepdocs def test_filename_to_id(): + empty = io.BytesIO() + empty.name = "foo.pdf" # test ascii filename + assert File(empty).filename_to_id() == "file-foo_pdf-666F6F2E706466" - assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" # test filename containing unicode + empty.name = "foo\u00A9.txt" + assert File(empty).filename_to_id() == "file-foo__txt-666F6FC2A92E747874" - assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" # test filenaming starting with unicode + empty.name = "ファイル名.pdf" + assert File(empty).filename_to_id() == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" - assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" ===========changed ref 4=========== + # module: scripts.prepdocslib.blobmanager + + ===========changed ref 5=========== + # module: scripts.prepdocslib.listfilestrategy + + ===========changed ref 6=========== + # module: scripts.prepdocslib + + ===========changed ref 7=========== + # module: scripts.prepdocslib.embeddings + + ===========changed ref 8=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __enter__(self): + return self + ===========changed ref 9=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_embedding_arguments(self) -> dict[str, Any]: + raise NotImplementedError + ===========changed ref 10=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __exit__(self, *args): + self.close() + ===========changed ref 11=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list_paths(self) -> AsyncGenerator[str, None]: + if False: + yield + ===========changed ref 12=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list(self) -> AsyncGenerator[File, None]: + if False: + yield + ===========changed ref 13=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 14=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 15=========== # module: scripts.prepdocs + def is_key_empty(key): + return key is None or len(key.strip()) == 0 + ===========changed ref 16=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} + ===========changed ref 17=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def __init__(self, path_pattern: str, verbose: bool = False): + self.path_pattern = path_pattern + self.verbose = verbose + ===========changed ref 18=========== + # module: scripts.prepdocslib.embeddings + class EmbeddingBatch: + def __init__(self, texts: List[str], token_length: int): + self.texts = texts + self.token_length = token_length + ===========changed ref 19=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a local filesystem + """ + ===========changed ref 20=========== + # module: scripts.prepdocslib.embeddings + class EmbeddingBatch: + """ + Represents a batch of text that is going to be embedded + """ + ===========changed ref 21=========== + # module: scripts.prepdocslib.listfilestrategy + class LocalListFileStrategy(ListFileStrategy): + def list_paths(self) -> AsyncGenerator[str, None]: + async for p in self._list_paths(self.path_pattern): + yield p + ===========changed ref 22=========== + # module: scripts.prepdocslib.blobmanager + class BlobManager: + """ + Class to manage uploading and deleting blobs containing citation information from a blob storage account + """ + ===========changed ref 23=========== + # module: scripts.prepdocslib.embeddings + class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + def get_api_type(self) -> str: + return "azure_ad" if isinstance(self.credential, AsyncTokenCredential) else "azure" + ===========changed ref 24=========== + # module: scripts.prepdocslib.listfilestrategy + class ADLSGen2ListFileStrategy(ListFileStrategy): + """ + Concrete strategy for listing files that are located in a data lake storage account + """ + ===========changed ref 25=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def before_retry_sleep(self, retry_state): + if self.verbose: + print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") +
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
a64a12eba8d78184e57ae625284cce3f2f76bd25
Refactor prepdocs (#862)
<0>:<del> monkeypatch.setattr(args, "verbose", True) <1>:<del> <2>:<add> async def mock_acreate(*args, **kwargs): <del> def mock_create(*args, **kwargs): <5>:<add> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <del> monkeypatch.setattr(openai.Embedding, "create", mock_create) <6>:<add> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <del> monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) <8>:<add> embeddings = AzureOpenAIEmbeddingService( <add> open_ai_service="x", <add> open_ai_deployment="x", <add> open_ai_model_name="text-embedding-ada-002", <add> credential=MockAzureCredential(), <add> disable_batch=False, <add> verbose=True, <add> ) <add> await embeddings.create_embeddings(texts=["foo"]) <del> compute_embedding("foo", "ada", "text-ada-003")
# module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_compute_embedding_autherror(monkeypatch, capsys): - def test_compute_embedding_autherror(monkeypatch, capsys): <0> monkeypatch.setattr(args, "verbose", True) <1> <2> def mock_create(*args, **kwargs): <3> raise openai.error.AuthenticationError <4> <5> monkeypatch.setattr(openai.Embedding, "create", mock_create) <6> monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) <7> with pytest.raises(openai.error.AuthenticationError): <8> compute_embedding("foo", "ada", "text-ada-003") <9>
===========unchanged ref 0=========== at: conftest MockAzureCredential() at: scripts.prepdocslib.embeddings.OpenAIEmbeddings SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} create_embeddings(texts: List[str]) -> List[List[float]] at: tests.test_prepdocs.test_compute_embedding_success embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=False, ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name="text-ada-003", credential=MockAzureCredential(), disable_batch=True, ) embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=False ) embeddings = OpenAIEmbeddingService( open_ai_model_name="text-ada-003", credential=MockAzureCredential(), organization="org", disable_batch=True ) ===========changed ref 0=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_embeddings(self, texts: List[str]) -> List[List[float]]: + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts) + + return [await self.create_embedding_single(text) for text in texts] + ===========changed ref 1=========== # module: tests.test_prepdocs - def test_compute_embedding_ratelimiterror(monkeypatch, capsys): - monkeypatch.setattr(args, "verbose", True) - - def mock_create(*args, **kwargs): - raise openai.error.RateLimitError - - monkeypatch.setattr(openai.Embedding, "create", mock_create) - monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) - with pytest.raises(tenacity.RetryError): - compute_embedding("foo", "ada", "text-ada-003") - captured = capsys.readouterr() - assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 - ===========changed ref 2=========== # module: tests.test_prepdocs def test_filename_to_id(): + empty = io.BytesIO() + empty.name = "foo.pdf" # test ascii filename + assert File(empty).filename_to_id() == "file-foo_pdf-666F6F2E706466" - assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" # test filename containing unicode + empty.name = "foo\u00A9.txt" + assert File(empty).filename_to_id() == "file-foo__txt-666F6FC2A92E747874" - assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" # test filenaming starting with unicode + empty.name = "ファイル名.pdf" + assert File(empty).filename_to_id() == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" - assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" ===========changed ref 3=========== # module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_compute_embedding_success(monkeypatch): - def test_compute_embedding_success(monkeypatch, capsys): - monkeypatch.setattr(args, "verbose", True) - + async def mock_create(*args, **kwargs): - def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return { "object": "list", "data": [ { "object": "embedding", "embedding": [ 0.0023064255, -0.009327292, -0.0028842222, ], "index": 0, } ], "model": "text-embedding-ada-002", "usage": {"prompt_tokens": 8, "total_tokens": 8}, } + monkeypatch.setattr(openai.Embedding, "acreate", mock_create) - monkeypatch.setattr(openai.Embedding, "create", mock_create) + embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", + open_ai_deployment="x", + open_ai_model_name="text-ada-003", + credential=MockAzureCredential(), + disable_batch=False, + ) + assert await embeddings.create_embeddings(texts=["foo"]) == [ + [ - assert compute_embedding("foo", "ada", "text-ada-003") == [ + 0.0023064255, - 0.0023064255, + -0.009327292, - -0.009327292, + -0.0028842222, - -0.0028842222, + ] ] ===========changed ref 4=========== + # module: scripts.prepdocslib.blobmanager + + ===========changed ref 5=========== + # module: scripts.prepdocslib.listfilestrategy + + ===========changed ref 6=========== + # module: scripts.prepdocslib + + ===========changed ref 7=========== + # module: scripts.prepdocslib.embeddings + + ===========changed ref 8=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __enter__(self): + return self + ===========changed ref 9=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_embedding_arguments(self) -> dict[str, Any]: + raise NotImplementedError + ===========changed ref 10=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __exit__(self, *args): + self.close() + ===========changed ref 11=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list_paths(self) -> AsyncGenerator[str, None]: + if False: + yield + ===========changed ref 12=========== + # module: scripts.prepdocslib.listfilestrategy + class ListFileStrategy(ABC): + def list(self) -> AsyncGenerator[File, None]: + if False: + yield + ===========changed ref 13=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def close(self): + if self.content: + self.content.close() + ===========changed ref 14=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def filename(self): + return os.path.basename(self.content.name) + ===========changed ref 15=========== # module: scripts.prepdocs + def is_key_empty(key): + return key is None or len(key.strip()) == 0 + ===========changed ref 16=========== + # module: scripts.prepdocslib.listfilestrategy + class File: + def __init__(self, content: IO, acls: Optional[dict[str, list]] = None): + self.content = content + self.acls = acls or {} +
tests.test_prepdocs/test_read_adls_gen2_files
Modified
Azure-Samples~azure-search-openai-demo
a64a12eba8d78184e57ae625284cce3f2f76bd25
Refactor prepdocs (#862)
<0>:<del> monkeypatch.setattr(args, "verbose", True) <1>:<del> monkeypatch.setattr(args, "useacls", True) <2>:<del> monkeypatch.setattr(args, "datalakestorageaccount", "STORAGE") <3>:<del> monkeypatch.setattr(scripts.prepdocs, "adls_gen2_creds", MockAzureCredential()) <4>:<add> adlsgen2_list_strategy = ADLSGen2ListFileStrategy( <add> data_lake_storage_account="a", data_lake_filesystem="a", data_lake_path="a", credential=MockAzureCredential() <add> ) <5>:<add> files = [file async for file in adlsgen2_list_strategy.list()] <add> assert len(files) == 3 <add> assert files[0].filename() == "a.txt" <add> assert files[0].acls == {"oids": ["A-USER-ID"], "groups": ["A-GROUP-ID"]} <add> assert files[1].filename() == "b.txt" <add> assert files[1].acls == {"oids": ["B-USER-ID"], "groups": ["B-GROUP-ID"]} <add> assert files[2].filename() == "c.txt" <add> assert files[2].acls == {"oids": ["C-USER-ID"], "groups": ["C-GROUP-ID"]} <del> def mock_remove(*args, **kwargs):
# module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_read_adls_gen2_files(monkeypatch, mock_data_lake_service_client): - def test_read_adls_gen2_files(monkeypatch, mock_data_lake_service_client): <0> monkeypatch.setattr(args, "verbose", True) <1> monkeypatch.setattr(args, "useacls", True) <2> monkeypatch.setattr(args, "datalakestorageaccount", "STORAGE") <3> monkeypatch.setattr(scripts.prepdocs, "adls_gen2_creds", MockAzureCredential()) <4> <5> def mock_remove(*args, **kwargs): <6> pass <7> <8> class MockIndexSections: <9> def __init__(self): <10> self.filenames = [] <11> <12> def call(self, filename, sections, acls): <13> if filename == "a.txt": <14> assert acls == {"oids": ["A-USER-ID"], "groups": ["A-GROUP-ID"]} <15> elif filename == "b.txt": <16> assert acls == {"oids": ["B-USER-ID"], "groups": ["B-GROUP-ID"]} <17> elif filename == "c.txt": <18> assert acls == {"oids": ["C-USER-ID"], "groups": ["C-GROUP-ID"]} <19> else: <20> raise Exception(f"Unexpected filename {filename}") <21> <22> self.filenames.append(filename) <23> <24> mock_index_sections = MockIndexSections() <25> <26> def mock_index_sections_method(filename, sections, acls): <27> mock_index_sections.call(filename, sections, acls) <28> <29> monkeypatch.setattr(scripts.prepdocs, "remove_blobs", mock_remove) <30> monkeypatch.setattr(scripts.prepdocs, "upload_blobs", mock_remove) <31> monkeypatch.setattr(scripts.prepdocs, "remove_from_index", mock_remove) </s>
===========below chunk 0=========== # module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_read_adls_gen2_files(monkeypatch, mock_data_lake_service_client): - def test_read_adls_gen2_files(monkeypatch, mock_data_lake_service_client): # offset: 1 monkeypatch.setattr(scripts.prepdocs, "update_embeddings_in_batch", mock_remove) monkeypatch.setattr(scripts.prepdocs, "create_sections", mock_remove) monkeypatch.setattr(scripts.prepdocs, "index_sections", mock_index_sections_method) read_adls_gen2_files(use_vectors=True, vectors_batch_support=True) assert mock_index_sections.filenames == ["a.txt", "b.txt", "c.txt"] ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: conftest MockAzureCredential() at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.error RateLimitError(message=None, http_body=None, http_status=None, json_body=None, headers=None, code=None) at: scripts.prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False, verbose: bool=False) OpenAIEmbeddingService(open_ai_model_name: str, credential: str, organization: Optional[str]=None, disable_batch: bool=False, verbose: bool=False) at: scripts.prepdocslib.embeddings.OpenAIEmbeddings create_embeddings(texts: List[str]) -> List[List[float]] at: tenacity RetryError(last_attempt: "Future") ===========unchanged ref 1=========== at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) ===========changed ref 0=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddingService(OpenAIEmbeddings): + """ + Class for using OpenAI embeddings + To learn more please visit https://platform.openai.com/docs/guides/embeddings + """ + ===========changed ref 1=========== + # module: scripts.prepdocslib.embeddings + class AzureOpenAIEmbeddingService(OpenAIEmbeddings): + """ + Class for using Azure OpenAI embeddings + To learn more please visit https://learn.microsoft.com/azure/ai-services/openai/concepts/understand-embeddings + """ + ===========changed ref 2=========== + # module: scripts.prepdocslib.embeddings + class OpenAIEmbeddings(ABC): + def create_embeddings(self, texts: List[str]) -> List[List[float]]: + if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL: + return await self.create_embedding_batch(texts) + + return [await self.create_embedding_single(text) for text in texts] + ===========changed ref 3=========== # module: tests.test_prepdocs - def test_compute_embedding_ratelimiterror(monkeypatch, capsys): - monkeypatch.setattr(args, "verbose", True) - - def mock_create(*args, **kwargs): - raise openai.error.RateLimitError - - monkeypatch.setattr(openai.Embedding, "create", mock_create) - monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) - with pytest.raises(tenacity.RetryError): - compute_embedding("foo", "ada", "text-ada-003") - captured = capsys.readouterr() - assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 - ===========changed ref 4=========== # module: tests.test_prepdocs + @pytest.mark.asyncio + async def test_compute_embedding_autherror(monkeypatch, capsys): - def test_compute_embedding_autherror(monkeypatch, capsys): - monkeypatch.setattr(args, "verbose", True) - + async def mock_acreate(*args, **kwargs): - def mock_create(*args, **kwargs): raise openai.error.AuthenticationError + monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) - monkeypatch.setattr(openai.Embedding, "create", mock_create) + monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) - monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + embeddings = AzureOpenAIEmbeddingService( + open_ai_service="x", + open_ai_deployment="x", + open_ai_model_name="text-embedding-ada-002", + credential=MockAzureCredential(), + disable_batch=False, + verbose=True, + ) + await embeddings.create_embeddings(texts=["foo"]) - compute_embedding("foo", "ada", "text-ada-003") ===========changed ref 5=========== # module: tests.test_prepdocs def test_filename_to_id(): + empty = io.BytesIO() + empty.name = "foo.pdf" # test ascii filename + assert File(empty).filename_to_id() == "file-foo_pdf-666F6F2E706466" - assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" # test filename containing unicode + empty.name = "foo\u00A9.txt" + assert File(empty).filename_to_id() == "file-foo__txt-666F6FC2A92E747874" - assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" # test filenaming starting with unicode + empty.name = "ファイル名.pdf" + assert File(empty).filename_to_id() == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" - assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466"
locustfile/ChatUser.ask_question
Modified
Azure-Samples~azure-search-openai-demo
c989048fc5c1f5bb4192ceeeb25070566774cbbc
add screenshot (#870)
<5>:<add> "messages": [ <del> "history": [ <18>:<add> "context": { <add> "overrides": { <del> "overrides": { <19>:<add> "retrieval_mode": "hybrid", <del> "retrieval_mode": "hybrid", <20>:<add> "semantic_ranker": True, <del> "semantic_ranker": True, <21>:<add> "semantic_captions": False, <del> "semantic_captions": False, <22>:<add> "top": 3, <del> "top": 3, <23>:<add> "suggest_followup_questions": False, <del> "suggest_followup_questions": False, <24>:<add> }, <31>:<add> "messages": [ <del> "history": [
# module: locustfile class ChatUser(HttpUser): @task def ask_question(self): <0> self.client.get("/") <1> time.sleep(5) <2> self.client.post( <3> "/chat", <4> json={ <5> "history": [ <6> { <7> "content": random.choice( <8> [ <9> "What is included in my Northwind Health Plus plan that is not in standard?", <10> "What does a Product Manager do?", <11> "What happens in a performance review?", <12> "Whats your whistleblower policy?", <13> ] <14> ), <15> "role": "user", <16> }, <17> ], <18> "overrides": { <19> "retrieval_mode": "hybrid", <20> "semantic_ranker": True, <21> "semantic_captions": False, <22> "top": 3, <23> "suggest_followup_questions": False, <24> }, <25> }, <26> ) <27> time.sleep(5) <28> self.client.post( <29> "/chat", <30> json={ <31> "history": [ <32> {"content": "What happens in a performance review?", "role": "user"}, <33> { <34> "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year</s>
===========below chunk 0=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 "role": "assistant", }, {"content": "Does my plan cover eye exams?", "role": "user"}, ], "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) ===========unchanged ref 0=========== at: locustfile.ChatUser wait_time = between(5, 20) at: random choice = _inst.choice at: time sleep(secs: float) -> None
scripts.prepdocslib.blobmanager/BlobManager.upload_blob
Modified
Azure-Samples~azure-search-openai-demo
bfb3ee5e8a3708caadb52afb41c89eb31c1511e6
Render entire PDFs instead of single pages (#840)
<6>:<del> # if file is PDF split into pages and upload each page as a separate blob <7>:<del> if os.path.splitext(file.content.name)[1].lower() == ".pdf": <8>:<add> # Re-open and upload the original file <add> with open(file.content.name, "rb") as reopened_file: <del> with open(file.content.name, "rb") as reopened_file: <9>:<del> reader = PdfReader(reopened_file) <10>:<del> pages = reader.pages <11>:<del> for i in range(len(pages)): <12>:<del> blob_name = BlobManager.blob_name_from_file_page(file.content.name, i) <13>:<del> if self.verbose: <14>:<del> print(f"\tUploading blob for page {i} -> {blob_name}") <15>:<del> f = io.BytesIO() <16>:<del> writer = PdfWriter() <17>:<del> writer.add_page(pages[i]) <18>:<del> writer.write(f) <19>:<del> f.seek(0) <20>:<del> await container_client.upload_blob(blob_name, f, overwrite=True) <21>:<del> else: <22>:<add> blob_name = BlobManager.blob_name_from_file_name(file.content.name) <del> blob_name = BlobManager.blob_name_from_file_page(file.content.name, page=0) <23>:<add> print(f"\tUploading blob for whole file -> {blob_name}") <add> await container_client.upload_blob(blob_name, reopened_file, overwrite=True) <del> await container_client.upload_blob(blob_name, file.content, overwrite=True)
# module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File): <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> await container_client.create_container() <5> <6> # if file is PDF split into pages and upload each page as a separate blob <7> if os.path.splitext(file.content.name)[1].lower() == ".pdf": <8> with open(file.content.name, "rb") as reopened_file: <9> reader = PdfReader(reopened_file) <10> pages = reader.pages <11> for i in range(len(pages)): <12> blob_name = BlobManager.blob_name_from_file_page(file.content.name, i) <13> if self.verbose: <14> print(f"\tUploading blob for page {i} -> {blob_name}") <15> f = io.BytesIO() <16> writer = PdfWriter() <17> writer.add_page(pages[i]) <18> writer.write(f) <19> f.seek(0) <20> await container_client.upload_blob(blob_name, f, overwrite=True) <21> else: <22> blob_name = BlobManager.blob_name_from_file_page(file.content.name, page=0) <23> await container_client.upload_blob(blob_name, file.content, overwrite=True) <24>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: scripts.prepdocslib.blobmanager.BlobManager blob_name_from_file_name(filename) -> str at: scripts.prepdocslib.blobmanager.BlobManager.__init__ self.endpoint = endpoint self.credential = credential self.container = container at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: typing.IO __slots__ = ()
scripts.prepdocslib.blobmanager/BlobManager.remove_blob
Modified
Azure-Samples~azure-search-openai-demo
bfb3ee5e8a3708caadb52afb41c89eb31c1511e6
Render entire PDFs instead of single pages (#840)
<11>:<add> async for blob_path in blobs: <del> async for b in blobs: <12>:<add> # This still supports PDFs split into individual pages, but we could remove in future to simplify code <add> if (prefix is not None and not re.match(rf"{prefix}-\d+\.pdf", blob_path)) or ( <del> if prefix is not None and not re.match(f"{prefix}-\d+\.pdf", b): <13>:<add> path is not None and blob_path == os.path.basename(path) <add> ): <15>:<add> print(f"\tRemoving blob {blob_path}") <del> print(f"\tRemoving blob {b}") <16>:<add> await container_client.delete_blob(blob_path) <del> await container_client.delete_blob(b)
# module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> return <5> if path is None: <6> prefix = None <7> blobs = container_client.list_blob_names() <8> else: <9> prefix = os.path.splitext(os.path.basename(path))[0] <10> blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) <11> async for b in blobs: <12> if prefix is not None and not re.match(f"{prefix}-\d+\.pdf", b): <13> continue <14> if self.verbose: <15> print(f"\tRemoving blob {b}") <16> await container_client.delete_blob(b) <17>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocslib.blobmanager.BlobManager.__init__ self.verbose = verbose ===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() - # if file is PDF split into pages and upload each page as a separate blob - if os.path.splitext(file.content.name)[1].lower() == ".pdf": + # Re-open and upload the original file + with open(file.content.name, "rb") as reopened_file: - with open(file.content.name, "rb") as reopened_file: - reader = PdfReader(reopened_file) - pages = reader.pages - for i in range(len(pages)): - blob_name = BlobManager.blob_name_from_file_page(file.content.name, i) - if self.verbose: - print(f"\tUploading blob for page {i} -> {blob_name}") - f = io.BytesIO() - writer = PdfWriter() - writer.add_page(pages[i]) - writer.write(f) - f.seek(0) - await container_client.upload_blob(blob_name, f, overwrite=True) - else: + blob_name = BlobManager.blob_name_from_file_name(file.content.name) - blob_name = BlobManager.blob_name_from_file_page(file.content.name, page=0) + print(f"\tUploading blob for whole file -> {blob_name}") + await container_client.upload_blob(blob_name, reopened_file, overwrite=True) - await container_client.upload_blob(blob_name, file.content, overwrite=True)
scripts.prepdocslib.searchmanager/SearchManager.update_content
Modified
Azure-Samples~azure-search-openai-demo
bfb3ee5e8a3708caadb52afb41c89eb31c1511e6
Render entire PDFs instead of single pages (#840)
<10>:<add> "sourcepage": BlobManager.sourcepage_from_file_page( <del> "sourcepage": BlobManager.blob_name_from_file_page(
# module: scripts.prepdocslib.searchmanager class SearchManager: def update_content(self, sections: List[Section]): <0> MAX_BATCH_SIZE = 1000 <1> section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)] <2> <3> async with self.search_info.create_search_client() as search_client: <4> for batch in section_batches: <5> documents = [ <6> { <7> "id": f"{section.content.filename_to_id()}-page-{i}", <8> "content": section.split_page.text, <9> "category": section.category, <10> "sourcepage": BlobManager.blob_name_from_file_page( <11> filename=section.content.filename(), page=section.split_page.page_num <12> ), <13> "sourcefile": section.content.filename(), <14> **section.content.acls, <15> } <16> for i, section in enumerate(batch) <17> ] <18> if self.embeddings: <19> embeddings = await self.embeddings.create_embeddings( <20> texts=[section.split_page.text for section in batch] <21> ) <22> for i, document in enumerate(documents): <23> document["embedding"] = embeddings[i] <24> <25> await search_client.upload_documents(documents) <26>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: scripts.prepdocslib.blobmanager.BlobManager sourcepage_from_file_page(filename, page=0) -> str at: scripts.prepdocslib.embeddings.OpenAIEmbeddings SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} create_embeddings(texts: List[str]) -> List[List[float]] at: scripts.prepdocslib.listfilestrategy.File filename() filename_to_id() at: scripts.prepdocslib.listfilestrategy.File.__init__ self.acls = acls or {} at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.embeddings = embeddings at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page self.content = content self.category = category at: scripts.prepdocslib.strategy.SearchInfo create_search_client() -> SearchClient at: scripts.prepdocslib.textsplitter.SplitPage.__init__ self.page_num = page_num self.text = text at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def sourcepage_from_file_page(cls, filename, page=0) -> str: + if os.path.splitext(filename)[1].lower() == ".pdf": + return f"{os.path.basename(filename)}#page={page+1}" + else: + return os.path.basename(filename) + ===========changed ref 1=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 2=========== # module: scripts.prepdocslib.blobmanager class BlobManager: - @classmethod - def blob_name_from_file_page(cls, filename, page=0) -> str: - if os.path.splitext(filename)[1].lower() == ".pdf": - return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" - else: - return os.path.basename(filename) - ===========changed ref 3=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def remove_blob(self, path: Optional[str] = None): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): return if path is None: prefix = None blobs = container_client.list_blob_names() else: prefix = os.path.splitext(os.path.basename(path))[0] blobs = container_client.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]) + async for blob_path in blobs: - async for b in blobs: + # This still supports PDFs split into individual pages, but we could remove in future to simplify code + if (prefix is not None and not re.match(rf"{prefix}-\d+\.pdf", blob_path)) or ( - if prefix is not None and not re.match(f"{prefix}-\d+\.pdf", b): + path is not None and blob_path == os.path.basename(path) + ): continue if self.verbose: + print(f"\tRemoving blob {blob_path}") - print(f"\tRemoving blob {b}") + await container_client.delete_blob(blob_path) - await container_client.delete_blob(b) ===========changed ref 4=========== # module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File): async with BlobServiceClient( account_url=self.endpoint, credential=self.credential ) as service_client, service_client.get_container_client(self.container) as container_client: if not await container_client.exists(): await container_client.create_container() - # if file is PDF split into pages and upload each page as a separate blob - if os.path.splitext(file.content.name)[1].lower() == ".pdf": + # Re-open and upload the original file + with open(file.content.name, "rb") as reopened_file: - with open(file.content.name, "rb") as reopened_file: - reader = PdfReader(reopened_file) - pages = reader.pages - for i in range(len(pages)): - blob_name = BlobManager.blob_name_from_file_page(file.content.name, i) - if self.verbose: - print(f"\tUploading blob for page {i} -> {blob_name}") - f = io.BytesIO() - writer = PdfWriter() - writer.add_page(pages[i]) - writer.write(f) - f.seek(0) - await container_client.upload_blob(blob_name, f, overwrite=True) - else: + blob_name = BlobManager.blob_name_from_file_name(file.content.name) - blob_name = BlobManager.blob_name_from_file_page(file.content.name, page=0) + print(f"\tUploading blob for whole file -> {blob_name}") + await container_client.upload_blob(blob_name, reopened_file, overwrite=True) - await container_client.upload_blob(blob_name, file.content, overwrite=True)
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
bfb3ee5e8a3708caadb52afb41c89eb31c1511e6
Render entire PDFs instead of single pages (#840)
<0>:<add> # Remove page number from path, filename-1.txt -> filename.txt <add> if path.find("#page=") > 0: <add> path_parts = path.rsplit("#page=", 1) <add> path = path_parts[0] <add> logging.info("Opening file %s at page %s", path) <1>:<add> try: <add> blob = await blob_container_client.get_blob_client(path).download_blob() <del> blob = await blob_container_client.get_blob_client(path).download_blob() <2>:<add> except ResourceNotFoundError: <add> logging.exception("Path not found: %s", path) <add> abort(404)
# module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") + async def content_file(path: str): - async def content_file(path): <0> blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <1> blob = await blob_container_client.get_blob_client(path).download_blob() <2> if not blob.properties or not blob.properties.has_key("content_settings"): <3> abort(404) <4> mime_type = blob.properties["content_settings"]["content_type"] <5> if mime_type == "application/octet-stream": <6> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <7> blob_file = io.BytesIO() <8> await blob.readinto(blob_file) <9> blob_file.seek(0) <10> return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) <11>
===========unchanged ref 0=========== at: app.backend.app CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== + # module: tests.test_content_file + + ===========changed ref 1=========== + # module: tests.test_blob_manager + + ===========changed ref 2=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def blob_name_from_file_name(cls, filename) -> str: + return os.path.basename(filename) + ===========changed ref 3=========== + # module: tests.test_content_file + class MockAzureCredential: + def get_token(self, uri): + return MockToken("mock_token", 9999999999) + ===========changed ref 4=========== + # module: tests.test_content_file + MockToken = namedtuple("MockToken", ["token", "expires_on"]) + ===========changed ref 5=========== # module: scripts.prepdocslib.blobmanager class BlobManager: + @classmethod + def sourcepage_from_file_page(cls, filename, page=0) -> str: + if os.path.splitext(filename)[1].lower() == ".pdf": + return f"{os.path.basename(filename)}#page={page+1}" + else: + return os.path.basename(filename) + ===========changed ref 6=========== + # module: tests.test_blob_manager + def test_blob_name_from_file_name(): + assert BlobManager.blob_name_from_file_name("tmp/test.pdf") == "test.pdf" + assert BlobManager.blob_name_from_file_name("tmp/test.html") == "test.html" + ===========changed ref 7=========== + # module: tests.test_blob_manager + def test_sourcepage_from_file_page(): + assert BlobManager.sourcepage_from_file_page("test.pdf", 0) == "test.pdf#page=1" + assert BlobManager.sourcepage_from_file_page("test.html", 0) == "test.html" + ===========changed ref 8=========== + # module: tests.test_blob_manager + @pytest.fixture + def blob_manager(monkeypatch): + return BlobManager( + endpoint=f"https://{os.environ['AZURE_STORAGE_ACCOUNT']}.blob.core.windows.net", + credential=MockAzureCredential(), + container=os.environ["AZURE_STORAGE_CONTAINER"], + verbose=True, + ) + ===========changed ref 9=========== # module: scripts.prepdocslib.blobmanager class BlobManager: - @classmethod - def blob_name_from_file_page(cls, filename, page=0) -> str: - if os.path.splitext(filename)[1].lower() == ".pdf": - return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" - else: - return os.path.basename(filename) - ===========changed ref 10=========== + # module: tests.test_blob_manager + @pytest.mark.asyncio + @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") + async def test_dont_remove_if_no_container(monkeypatch, mock_env, blob_manager): + async def mock_exists(*args, **kwargs): + return False + + monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) + + async def mock_delete_blob(*args, **kwargs): + assert False, "delete_blob() shouldn't have been called" + + monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.delete_blob", mock_delete_blob) + + await blob_manager.remove_blob() + ===========changed ref 11=========== + # module: tests.test_blob_manager + @pytest.mark.asyncio + @pytest.mark.skipif(sys.version_info.minor < 10, reason="requires Python 3.10 or higher") + async def test_create_container_upon_upload(monkeypatch, mock_env, blob_manager): + with NamedTemporaryFile(suffix=".pdf") as temp_file: + f = File(temp_file.file) + filename = f.content.name.split("/tmp/")[1] + + # Set up mocks used by upload_blob + async def mock_exists(*args, **kwargs): + return False + + monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.exists", mock_exists) + + async def mock_create_container(*args, **kwargs): + return + + monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.create_container", mock_create_container) + + async def mock_upload_blob(self, name, *args, **kwargs): + assert name == filename + return True + + monkeypatch.setattr("azure.storage.blob.aio.ContainerClient.upload_blob", mock_upload_blob) + + await blob_manager.upload_blob(f) + ===========changed ref 12=========== # module: scripts.prepdocslib.searchmanager class SearchManager: def update_content(self, sections: List[Section]): MAX_BATCH_SIZE = 1000 section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)] async with self.search_info.create_search_client() as search_client: for batch in section_batches: documents = [ { "id": f"{section.content.filename_to_id()}-page-{i}", "content": section.split_page.text, "category": section.category, + "sourcepage": BlobManager.sourcepage_from_file_page( - "sourcepage": BlobManager.blob_name_from_file_page( filename=section.content.filename(), page=section.split_page.page_num ), "sourcefile": section.content.filename(), **section.content.acls, } for i, section in enumerate(batch) ] if self.embeddings: embeddings = await self.embeddings.create_embeddings( texts=[section.split_page.text for section in batch] ) for i, document in enumerate(documents): document["embedding"] = embeddings[i] await search_client.upload_documents(documents)
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
4479a2c5c1e6d54bdfe719077092a5e4bcd5d229
Handle errors better especialyl for streaming (#884)
<15>:<add> except Exception as error: <del> except Exception as e: <16>:<add> logging.exception("Exception in /ask: %s", error) <del> logging.exception("Exception in /ask") <17>:<add> return jsonify(error_dict(error)), 500 <del> return jsonify({"error": str(e)}), 500
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_ASK_APPROACH] <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await approach.run( <12> request_json["messages"], context=context, session_state=request_json.get("session_state") <13> ) <14> return jsonify(r) <15> except Exception as e: <16> logging.exception("Exception in /ask") <17> return jsonify({"error": str(e)}), 500 <18>
===========unchanged ref 0=========== at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.content_file path = path_parts[0] mime_type = blob.properties["content_settings"]["content_type"] mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() ===========unchanged ref 1=========== at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int ===========changed ref 0=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + ERROR_MESSAGE = """The app encountered an error processing your request. + If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. + Error type: {error_type} + """ bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 1=========== # module: tests.test_app + def test_error_dict(caplog): + error = app.error_dict(Exception("test")) + assert error == { + "error": "The app encountered an error processing your request.\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\nError type: <class 'Exception'>\n" + } + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: something bad happened" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_format_as_ndjson_error(caplog): + async def gen(): + if False: + yield {"a": "I ❤️ 🐍"} + raise ZeroDivisionError("something bad happened") + + result = [line async for line in app.format_as_ndjson(gen())] + assert "Exception while generating response stream: something bad happened\n" in caplog.text + assert result == [ + '{"error": "The app encountered an error processing your request.\\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\\nError type: <class \'ZeroDivisionError\'>\\n"}' + ] + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /ask: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /chat: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.app/format_as_ndjson
Modified
Azure-Samples~azure-search-openai-demo
4479a2c5c1e6d54bdfe719077092a5e4bcd5d229
Handle errors better especialyl for streaming (#884)
<0>:<add> try: <add> async for event in r: <del> async for event in r: <1>:<add> yield json.dumps(event, ensure_ascii=False) + "\n" <del> yield json.dumps(event, ensure_ascii=False) + "\n" <2>:<add> except Exception as e: <add> logging.exception("Exception while generating response stream: %s", e) <add> yield json.dumps(error_dict(e))
# module: app.backend.app def format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None]: <0> async for event in r: <1> yield json.dumps(event, ensure_ascii=False) + "\n" <2>
===========unchanged ref 0=========== at: app.backend.app.ask request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) ===========changed ref 0=========== # module: app.backend.app + def error_dict(error: Exception) -> dict: + return {"error": ERROR_MESSAGE.format(error_type=type(error))} + ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + ERROR_MESSAGE = """The app encountered an error processing your request. + If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. + Error type: {error_type} + """ bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 2=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: approach = current_app.config[CONFIG_ASK_APPROACH] # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) + except Exception as error: - except Exception as e: + logging.exception("Exception in /ask: %s", error) - logging.exception("Exception in /ask") + return jsonify(error_dict(error)), 500 - return jsonify({"error": str(e)}), 500 ===========changed ref 3=========== # module: tests.test_app + def test_error_dict(caplog): + error = app.error_dict(Exception("test")) + assert error == { + "error": "The app encountered an error processing your request.\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\nError type: <class 'Exception'>\n" + } + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: something bad happened" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_format_as_ndjson_error(caplog): + async def gen(): + if False: + yield {"a": "I ❤️ 🐍"} + raise ZeroDivisionError("something bad happened") + + result = [line async for line in app.format_as_ndjson(gen())] + assert "Exception while generating response stream: something bad happened\n" in caplog.text + assert result == [ + '{"error": "The app encountered an error processing your request.\\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\\nError type: <class \'ZeroDivisionError\'>\\n"}' + ] + ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /ask: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /chat: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
4479a2c5c1e6d54bdfe719077092a5e4bcd5d229
Handle errors better especialyl for streaming (#884)
<20>:<add> except Exception as error: <del> except Exception as e: <21>:<add> logging.exception("Exception in /chat: %s", error) <del> logging.exception("Exception in /chat") <22>:<add> return jsonify(error_dict(error)), 500 <del> return jsonify({"error": str(e)}), 500
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run( <9> request_json["messages"], <10> stream=request_json.get("stream", False), <11> context=context, <12> session_state=request_json.get("session_state"), <13> ) <14> if isinstance(result, dict): <15> return jsonify(result) <16> else: <17> response = await make_response(format_as_ndjson(result)) <18> response.timeout = None # type: ignore <19> return response <20> except Exception as e: <21> logging.exception("Exception in /chat") <22> return jsonify({"error": str(e)}), 500 <23>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_dict(error: Exception) -> dict at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.app + def error_dict(error: Exception) -> dict: + return {"error": ERROR_MESSAGE.format(error_type=type(error))} + ===========changed ref 1=========== # module: app.backend.app def format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None]: + try: + async for event in r: - async for event in r: + yield json.dumps(event, ensure_ascii=False) + "\n" - yield json.dumps(event, ensure_ascii=False) + "\n" + except Exception as e: + logging.exception("Exception while generating response stream: %s", e) + yield json.dumps(error_dict(e)) ===========changed ref 2=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" + ERROR_MESSAGE = """The app encountered an error processing your request. + If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. + Error type: {error_type} + """ bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 3=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: approach = current_app.config[CONFIG_ASK_APPROACH] # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) + except Exception as error: - except Exception as e: + logging.exception("Exception in /ask: %s", error) - logging.exception("Exception in /ask") + return jsonify(error_dict(error)), 500 - return jsonify({"error": str(e)}), 500 ===========changed ref 4=========== # module: tests.test_app + def test_error_dict(caplog): + error = app.error_dict(Exception("test")) + assert error == { + "error": "The app encountered an error processing your request.\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\nError type: <class 'Exception'>\n" + } + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", mock.Mock(side_effect=ZeroDivisionError("something bad happened")) + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: something bad happened" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_format_as_ndjson_error(caplog): + async def gen(): + if False: + yield {"a": "I ❤️ 🐍"} + raise ZeroDivisionError("something bad happened") + + result = [line async for line in app.format_as_ndjson(gen())] + assert "Exception while generating response stream: something bad happened\n" in caplog.text + assert result == [ + '{"error": "The app encountered an error processing your request.\\nIf you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information.\\nError type: <class \'ZeroDivisionError\'>\\n"}' + ] + ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /ask: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock(side_effect=ZeroDivisionError("something bad happened")), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "What is the capital of France?", "role": "user"}]}, + ) + assert response.status_code == 500 + result = await response.get_json() + assert "Exception in /chat: something bad happened" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
scripts.prepdocslib.listfilestrategy/LocalListFileStrategy.check_md5
Modified
Azure-Samples~azure-search-openai-demo
3bf2d198185287bf6aae0d7190845901e1774e8e
Fix list file (#897)
<16>:<add> return True <del> return True
# module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def check_md5(self, path: str) -> bool: <0> # if filename ends in .md5 skip <1> if path.endswith(".md5"): <2> return True <3> <4> # if there is a file called .md5 in this directory, see if its updated <5> stored_hash = None <6> with open(path, "rb") as file: <7> existing_hash = hashlib.md5(file.read()).hexdigest() <8> hash_path = f"{path}.md5" <9> if os.path.exists(hash_path): <10> with open(hash_path, encoding="utf-8") as md5_f: <11> stored_hash = md5_f.read() <12> <13> if stored_hash and stored_hash.strip() == existing_hash.strip(): <14> if self.verbose: <15> print(f"Skipping {path}, no changes detected.") <16> return True <17> <18> # Write the hash <19> with open(hash_path, "w", encoding="utf-8") as md5_f: <20> md5_f.write(existing_hash) <21> <22> return False <23>
===========unchanged ref 0=========== at: hashlib md5(string: ReadableBuffer=...) -> _Hash at: hashlib._Hash digest_size: int block_size: int name: str hexdigest() -> str at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: io.TextIOWrapper read(self, size: Optional[int]=..., /) -> str write(self, s: str, /) -> int at: os.path exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy.__init__ self.verbose = verbose at: typing.BinaryIO __slots__ = () write(s: AnyStr) -> int at: typing.IO __slots__ = () read(n: int=...) -> AnyStr write(s: AnyStr) -> int
scripts.prepdocslib.listfilestrategy/LocalListFileStrategy._list_paths
Modified
Azure-Samples~azure-search-openai-demo
18eee6bc1251c6eef1366a66df310865b027fc34
Test and fix file strategy list (#907)
<4>:<add> else: <add> # Only list files, not directories <add> yield path <5>:<del> yield path <6>:<del>
# module: scripts.prepdocslib.listfilestrategy class LocalListFileStrategy(ListFileStrategy): def _list_paths(self, path_pattern: str) -> AsyncGenerator[str, None]: <0> for path in glob(path_pattern): <1> if os.path.isdir(path): <2> async for p in self._list_paths(f"{path}/*"): <3> yield p <4> <5> yield path <6>
===========unchanged ref 0=========== at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.ListFileStrategy list(self) -> AsyncGenerator[File, None] at: scripts.prepdocslib.listfilestrategy.LocalListFileStrategy list_paths() -> AsyncGenerator[str, None] check_md5(path: str) -> bool at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: scripts.prepdocslib.listfilestrategy class File: - def __exit__(self, *args): - self.close() - ===========changed ref 1=========== # module: scripts.prepdocslib.listfilestrategy class File: - def __enter__(self): - return self -
scripts.prepdocslib.blobmanager/BlobManager.upload_blob
Modified
Azure-Samples~azure-search-openai-demo
e128bd97f8a3e1dc2a9f3404b6b6e19ea6d86f86
Enable uploads of large PDFs over connections with less upload speed (#923)
<1>:<add> account_url=self.endpoint, credential=self.credential, max_single_put_size=4 * 1024 * 1024 <del> account_url=self.endpoint, credential=self.credential
# module: scripts.prepdocslib.blobmanager class BlobManager: def upload_blob(self, file: File): <0> async with BlobServiceClient( <1> account_url=self.endpoint, credential=self.credential <2> ) as service_client, service_client.get_container_client(self.container) as container_client: <3> if not await container_client.exists(): <4> await container_client.create_container() <5> <6> # Re-open and upload the original file <7> with open(file.content.name, "rb") as reopened_file: <8> blob_name = BlobManager.blob_name_from_file_name(file.content.name) <9> print(f"\tUploading blob for whole file -> {blob_name}") <10> await container_client.upload_blob(blob_name, reopened_file, overwrite=True) <11>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: scripts.prepdocslib.blobmanager.BlobManager blob_name_from_file_name(filename) -> str at: scripts.prepdocslib.blobmanager.BlobManager.__init__ self.endpoint = endpoint self.credential = credential self.container = container at: scripts.prepdocslib.listfilestrategy File(content: IO, acls: Optional[dict[str, list]]=None) at: scripts.prepdocslib.listfilestrategy.File.__init__ self.content = content at: typing.IO __slots__ = ()
app.backend.app/error_dict
Modified
Azure-Samples~azure-search-openai-demo
204bb1cf4dbbe1eb2aa99a4dcc08e1df96fa5a86
Add special error (#896)
<0>:<add> if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": <add> return {"error": ERROR_MESSAGE_FILTER}
# module: app.backend.app def error_dict(error: Exception) -> dict: <0> return {"error": ERROR_MESSAGE.format(error_type=type(error))} <1>
===========changed ref 0=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ + ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: The response was filtered" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /ask: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /chat: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
204bb1cf4dbbe1eb2aa99a4dcc08e1df96fa5a86
Add special error (#896)
<16>:<del> logging.exception("Exception in /ask: %s", error) <17>:<del> return jsonify(error_dict(error)), 500 <18>:<add> return error_response(error, "/ask")
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_ASK_APPROACH] <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await approach.run( <12> request_json["messages"], context=context, session_state=request_json.get("session_state") <13> ) <14> return jsonify(r) <15> except Exception as error: <16> logging.exception("Exception in /ask: %s", error) <17> return jsonify(error_dict(error)), 500 <18>
===========unchanged ref 0=========== at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ bp = Blueprint("routes", __name__, static_folder="static") error_dict(error: Exception) -> dict at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai.error InvalidRequestError(message, param, code=None, http_body=None, http_status=None, json_body=None, headers=None) ===========changed ref 0=========== # module: app.backend.app def error_dict(error: Exception) -> dict: + if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": + return {"error": ERROR_MESSAGE_FILTER} return {"error": ERROR_MESSAGE.format(error_type=type(error))} ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ + ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: The response was filtered" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /ask: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /chat: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
204bb1cf4dbbe1eb2aa99a4dcc08e1df96fa5a86
Add special error (#896)
<21>:<del> logging.exception("Exception in /chat: %s", error) <22>:<del> return jsonify(error_dict(error)), 500 <23>:<add> return error_response(error, "/chat")
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run( <9> request_json["messages"], <10> stream=request_json.get("stream", False), <11> context=context, <12> session_state=request_json.get("session_state"), <13> ) <14> if isinstance(result, dict): <15> return jsonify(result) <16> else: <17> response = await make_response(format_as_ndjson(result)) <18> response.timeout = None # type: ignore <19> return response <20> except Exception as error: <21> logging.exception("Exception in /chat: %s", error) <22> return jsonify(error_dict(error)), 500 <23>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") error_dict(error: Exception) -> dict at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.app def error_dict(error: Exception) -> dict: + if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": + return {"error": ERROR_MESSAGE_FILTER} return {"error": ERROR_MESSAGE.format(error_type=type(error))} ===========changed ref 1=========== # module: app.backend.app + def error_response(error: Exception, route: str, status_code: int = 500): + logging.exception("Exception in %s: %s", route, error) + if isinstance(error, openai.error.InvalidRequestError) and error.code == "content_filter": + status_code = 400 + return jsonify(error_dict(error)), status_code + ===========changed ref 2=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) auth_helper = current_app.config[CONFIG_AUTH_CLIENT] context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) try: approach = current_app.config[CONFIG_ASK_APPROACH] # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await approach.run( request_json["messages"], context=context, session_state=request_json.get("session_state") ) return jsonify(r) except Exception as error: - logging.exception("Exception in /ask: %s", error) - return jsonify(error_dict(error)), 500 + return error_response(error, "/ask") ===========changed ref 3=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" ERROR_MESSAGE = """The app encountered an error processing your request. If you are an administrator of the app, view the full error in the logs. See aka.ms/appservice-logs for more information. Error type: {error_type} """ + ERROR_MESSAGE_FILTER = """Your message contains content that was flagged by the OpenAI content filter.""" bp = Blueprint("routes", __name__, static_folder="static") # Fix Windows registry issue with mimetypes mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety_streaming(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "openai.ChatCompletion.acreate", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}], "stream": True}, + ) + assert response.status_code == 200 + assert "Exception while generating response stream: The response was filtered" in caplog.text + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.retrievethenread.RetrieveThenReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/ask", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /ask: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_handle_exception_contentsafety(client, monkeypatch, snapshot, caplog): + monkeypatch.setattr( + "approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run", + mock.Mock( + side_effect=openai.error.InvalidRequestError("The response was filtered", "prompt", code="content_filter") + ), + ) + + response = await client.post( + "/chat", + json={"messages": [{"content": "How do I do something bad?", "role": "user"}]}, + ) + assert response.status_code == 400 + result = await response.get_json() + assert "Exception in /chat: The response was filtered" in caplog.text + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
scripts.prepdocslib.pdfparser/DocumentAnalysisPdfParser.parse
Modified
Azure-Samples~azure-search-openai-demo
1f7de662e3f452cfaa4a7b1982357313a8345f61
replace form recognizer with doc intelligence (#686)
<1>:<add> print(f"Extracting text from '{content.name}' using Azure Document Intelligence") <del> print(f"Extracting text from '{content.name}' using Azure Form Recognizer")
# module: scripts.prepdocslib.pdfparser class DocumentAnalysisPdfParser(PdfParser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: <0> if self.verbose: <1> print(f"Extracting text from '{content.name}' using Azure Form Recognizer") <2> <3> async with DocumentAnalysisClient( <4> endpoint=self.endpoint, credential=self.credential, headers={"x-ms-useragent": USER_AGENT} <5> ) as form_recognizer_client: <6> poller = await form_recognizer_client.begin_analyze_document(model_id=self.model_id, document=content) <7> form_recognizer_results = await poller.result() <8> <9> offset = 0 <10> for page_num, page in enumerate(form_recognizer_results.pages): <11> tables_on_page = [ <12> table <13> for table in (form_recognizer_results.tables or []) <14> if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 <15> ] <16> <17> # mark all positions of the table spans in the page <18> page_offset = page.spans[0].offset <19> page_length = page.spans[0].length <20> table_chars = [-1] * page_length <21> for table_id, table in enumerate(tables_on_page): <22> for span in table.spans: <23> # replace all table spans with "table_id" in table_chars array <24> for i in range(span.length): <25> idx = span.offset - page_offset + i <26> if idx >= 0 and idx < page_length: <27> table_chars[idx] = table_id <28> <29> # build page text by replacing characters in table spans with table html <30> page_text = "" <31> added_tables = set() <32> for idx, table_id in enumerate(table_chars): <33> if table_id == -1: <34> page_text += form_recognizer_results.content[page_offset +</s>
===========below chunk 0=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisPdfParser(PdfParser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: # offset: 1 elif table_id not in added_tables: page_text += DocumentAnalysisPdfParser.table_to_html(tables_on_page[table_id]) added_tables.add(table_id) yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text) ===========unchanged ref 0=========== at: scripts.prepdocslib.pdfparser Page(page_num: int, offset: int, text: str) DocumentAnalysisPdfParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout", verbose: bool=False) at: scripts.prepdocslib.pdfparser.DocumentAnalysisPdfParser table_to_html(table: DocumentTable) at: scripts.prepdocslib.pdfparser.DocumentAnalysisPdfParser.__init__ self.model_id = model_id self.endpoint = endpoint self.credential = credential self.verbose = verbose at: scripts.prepdocslib.pdfparser.PdfParser parse(self, content: IO) -> AsyncGenerator[Page, None] at: scripts.prepdocslib.strategy USER_AGENT = "azure-search-chat-demo/1.0.0" at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) IO() at: typing.IO __slots__ = ()
scripts.prepdocs/setup_file_strategy
Modified
Azure-Samples~azure-search-openai-demo
1f7de662e3f452cfaa4a7b1982357313a8345f61
replace form recognizer with doc intelligence (#686)
<12>:<add> # check if Azure Document Intelligence credentials are provided <del> # check if Azure Form Recognizer credentials are provided <15>:<add> "Error: Azure Document Intelligence service is not provided. Please provide --formrecognizerservice or use --localpdfparser for local pypdf parser." <del> "Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser."
# module: scripts.prepdocs def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: <0> storage_creds = credential if is_key_empty(args.storagekey) else args.storagekey <1> blob_manager = BlobManager( <2> endpoint=f"https://{args.storageaccount}.blob.core.windows.net", <3> container=args.container, <4> credential=storage_creds, <5> verbose=args.verbose, <6> ) <7> <8> pdf_parser: PdfParser <9> if args.localpdfparser: <10> pdf_parser = LocalPdfParser() <11> else: <12> # check if Azure Form Recognizer credentials are provided <13> if args.formrecognizerservice is None: <14> print( <15> "Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser." <16> ) <17> exit(1) <18> formrecognizer_creds: Union[AsyncTokenCredential, AzureKeyCredential] = ( <19> credential if is_key_empty(args.formrecognizerkey) else AzureKeyCredential(args.formrecognizerkey) <20> ) <21> pdf_parser = DocumentAnalysisPdfParser( <22> endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", <23> credential=formrecognizer_creds, <24> verbose=args.verbose, <25> ) <26> <27> use_vectors = not args.novectors <28> embeddings: Optional[OpenAIEmbeddings] = None <29> if use_vectors and args.openaihost != "openai": <30> azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( <31> credential if is_key_empty(args.openaikey) else AzureKeyCredential(args.openaikey) <32> ) <33> embeddings = AzureOpenAIEmbeddingService( <34> open_ai_service=args.openaiservice, <35> open_ai_deployment=args.openaideployment, <36> </s>
===========below chunk 0=========== # module: scripts.prepdocs def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: # offset: 1 credential=azure_open_ai_credential, disable_batch=args.disablebatchvectors, verbose=args.verbose, ) elif use_vectors: embeddings = OpenAIEmbeddingService( open_ai_model_name=args.openaimodelname, credential=args.openaikey, organization=args.openaiorg, disable_batch=args.disablebatchvectors, verbose=args.verbose, ) print("Processing files...") list_file_strategy: ListFileStrategy if args.datalakestorageaccount: adls_gen2_creds = credential if is_key_empty(args.datalakekey) else args.datalakekey print(f"Using Data Lake Gen2 Storage Account {args.datalakestorageaccount}") list_file_strategy = ADLSGen2ListFileStrategy( data_lake_storage_account=args.datalakestorageaccount, data_lake_filesystem=args.datalakefilesystem, data_lake_path=args.datalakepath, credential=adls_gen2_creds, verbose=args.verbose, ) else: print(f"Using local files in {args.files}") list_file_strategy = LocalListFileStrategy(path_pattern=args.files, verbose=args.verbose) if args.removeall: document_action = DocumentAction.RemoveAll elif args.remove: document_action = DocumentAction.Remove else: document_action = DocumentAction.Add return FileStrategy( list_file_strategy=list_file_strategy, blob_manager=blob_manager, pdf_parser=pdf_parser, text_splitter=TextSplitter(), document_action=document_action, embeddings=embeddings, search_analyzer_name=args.searchanalyzername, use</s> ===========below chunk 1=========== # module: scripts.prepdocs def setup_file_strategy(credential: AsyncTokenCredential, args: Any) -> FileStrategy: # offset: 2 <s>=document_action, embeddings=embeddings, search_analyzer_name=args.searchanalyzername, use_acls=args.useacls, category=args.category, ) ===========unchanged ref 0=========== at: prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: prepdocslib.embeddings OpenAIEmbeddings(open_ai_model_name: str, disable_batch: bool=False, verbose: bool=False) AzureOpenAIEmbeddingService(open_ai_service: str, open_ai_deployment: str, open_ai_model_name: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], disable_batch: bool=False, verbose: bool=False) OpenAIEmbeddingService(open_ai_model_name: str, credential: str, organization: Optional[str]=None, disable_batch: bool=False, verbose: bool=False) at: prepdocslib.filestrategy DocumentAction() FileStrategy(list_file_strategy: ListFileStrategy, blob_manager: BlobManager, pdf_parser: PdfParser, text_splitter: TextSplitter, document_action: DocumentAction=DocumentAction.Add, embeddings: Optional[OpenAIEmbeddings]=None, search_analyzer_name: Optional[str]=None, use_acls: bool=False, category: Optional[str]=None) at: prepdocslib.listfilestrategy ListFileStrategy() LocalListFileStrategy(path_pattern: str, verbose: bool=False) ADLSGen2ListFileStrategy(data_lake_storage_account: str, data_lake_filesystem: str, data_lake_path: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: prepdocslib.pdfparser PdfParser() LocalPdfParser() DocumentAnalysisPdfParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout", verbose: bool=False) at: prepdocslib.textsplitter TextSplitter(verbose: bool=False) at: scripts.prepdocs is_key_empty(key) ===========changed ref 0=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisPdfParser(PdfParser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: if self.verbose: + print(f"Extracting text from '{content.name}' using Azure Document Intelligence") - print(f"Extracting text from '{content.name}' using Azure Form Recognizer") async with DocumentAnalysisClient( endpoint=self.endpoint, credential=self.credential, headers={"x-ms-useragent": USER_AGENT} ) as form_recognizer_client: poller = await form_recognizer_client.begin_analyze_document(model_id=self.model_id, document=content) form_recognizer_results = await poller.result() offset = 0 for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [ table for table in (form_recognizer_results.tables or []) if table.bounding_regions and table.bounding_regions[0].page_number == page_num + 1 ] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1] * page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >= 0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing characters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] elif table_id not in</s> ===========changed ref 1=========== # module: scripts.prepdocslib.pdfparser class DocumentAnalysisPdfParser(PdfParser): def parse(self, content: IO) -> AsyncGenerator[Page, None]: # offset: 1 <s> page_text += form_recognizer_results.content[page_offset + idx] elif table_id not in added_tables: page_text += DocumentAnalysisPdfParser.table_to_html(tables_on_page[table_id]) added_tables.add(table_id) yield Page(page_num=page_num, offset=offset, text=page_text) offset += len(page_text)
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
59b8cbd1a8bad3008572cf82a5494b653faf6a0b
set content-type for streamed response (#968)
<19>:<add> response.mimetype = "application/json-lines"
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <5> context["auth_claims"] = await auth_helper.get_auth_claims_if_enabled(request.headers) <6> try: <7> approach = current_app.config[CONFIG_CHAT_APPROACH] <8> result = await approach.run( <9> request_json["messages"], <10> stream=request_json.get("stream", False), <11> context=context, <12> session_state=request_json.get("session_state"), <13> ) <14> if isinstance(result, dict): <15> return jsonify(result) <16> else: <17> response = await make_response(format_as_ndjson(result)) <18> response.timeout = None # type: ignore <19> return response <20> except Exception as error: <21> return error_response(error, "/chat") <22>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None]
scripts.prepdocslib.searchmanager/SearchManager.update_content
Modified
Azure-Samples~azure-search-openai-demo
ff507749160e9c7d6743cbf07595bcf8e129a60f
[Bug fix] Fix prepdocs logic in uploading docs with more than 1000 sections (#971)
<4>:<add> for batch_index, batch in enumerate(section_batches): <del> for batch in section_batches: <7>:<add> "id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}", <del> "id": f"{section.content.filename_to_id()}-page-{i}", <16>:<add> for section_index, section in enumerate(batch) <del> for i, section in enumerate(batch)
# module: scripts.prepdocslib.searchmanager class SearchManager: def update_content(self, sections: List[Section]): <0> MAX_BATCH_SIZE = 1000 <1> section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)] <2> <3> async with self.search_info.create_search_client() as search_client: <4> for batch in section_batches: <5> documents = [ <6> { <7> "id": f"{section.content.filename_to_id()}-page-{i}", <8> "content": section.split_page.text, <9> "category": section.category, <10> "sourcepage": BlobManager.sourcepage_from_file_page( <11> filename=section.content.filename(), page=section.split_page.page_num <12> ), <13> "sourcefile": section.content.filename(), <14> **section.content.acls, <15> } <16> for i, section in enumerate(batch) <17> ] <18> if self.embeddings: <19> embeddings = await self.embeddings.create_embeddings( <20> texts=[section.split_page.text for section in batch] <21> ) <22> for i, document in enumerate(documents): <23> document["embedding"] = embeddings[i] <24> <25> await search_client.upload_documents(documents) <26>
===========unchanged ref 0=========== at: scripts.prepdocslib.blobmanager BlobManager(endpoint: str, container: str, credential: Union[AsyncTokenCredential, str], verbose: bool=False) at: scripts.prepdocslib.blobmanager.BlobManager sourcepage_from_file_page(filename, page=0) -> str at: scripts.prepdocslib.embeddings.OpenAIEmbeddings SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} create_embeddings(texts: List[str]) -> List[List[float]] at: scripts.prepdocslib.listfilestrategy.File filename() filename_to_id() at: scripts.prepdocslib.listfilestrategy.File.__init__ self.acls = acls or {} at: scripts.prepdocslib.searchmanager Section(split_page: SplitPage, content: File, category: Optional[str]=None) at: scripts.prepdocslib.searchmanager.SearchManager.__init__ self.search_info = search_info self.embeddings = embeddings at: scripts.prepdocslib.searchmanager.Section.__init__ self.split_page = split_page self.content = content self.category = category at: scripts.prepdocslib.strategy.SearchInfo create_search_client() -> SearchClient at: scripts.prepdocslib.textsplitter.SplitPage.__init__ self.page_num = page_num self.text = text at: typing List = _alias(list, 1, inst=False, name='List')
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
5de99a33cb6c614b3587ff821d7bfabfa943597a
Added insights dashboard (#898)
<0>:<add> app = Quart(__name__) <add> app.register_blueprint(bp) <add> <2>:<add> # This tracks HTTP requests made by aiohttp: <3>:<del> app = Quart(__name__) <4>:<del> app.register_blueprint(bp) <5>:<add> # This middleware tracks app route requests: <add> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[method-assign] <del> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[method-assign]
# module: app.backend.app def create_app(): <0> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <1> configure_azure_monitor() <2> AioHttpClientInstrumentor().instrument() <3> app = Quart(__name__) <4> app.register_blueprint(bp) <5> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[method-assign] <6> <7> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <8> default_level = "INFO" # In development, log more verbosely <9> if os.getenv("WEBSITE_HOSTNAME"): # In production, don't log as heavily <10> default_level = "WARNING" <11> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", default_level)) <12> <13> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <14> app.logger.info("CORS enabled for %s", allowed_origin) <15> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <16> return app <17>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: logging basicConfig(*, filename: Optional[StrPath]=..., filemode: str=..., format: str=..., datefmt: Optional[str]=..., style: str=..., level: Optional[_Level]=..., stream: Optional[IO[str]]=..., handlers: Optional[Iterable[Handler]]=...) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]